define_structure :Url, :channel, :nick, :time, :url, :info
-class ::UrlLinkError < RuntimeError
-end
-
class UrlPlugin < Plugin
LINK_INFO = "[Link Info]"
OUR_UNSAFE = Regexp.new("[^#{URI::PATTERN::UNRESERVED}#{URI::PATTERN::RESERVED}%# ]", false, 'N')
:default => ['localhost', '^192\.168\.', '^10\.', '^127\.', '^172\.(1[6-9]|2\d|31)\.'],
:on_change => Proc.new { |bot, v| bot.plugins['url'].reset_no_info_hosts },
:desc => "A list of regular expressions matching hosts for which no info should be provided")
-
+ Config.register Config::ArrayValue.new('url.only_on_channels',
+ :desc => "Show link info only on these channels",
+ :default => [])
+ Config.register Config::ArrayValue.new('url.ignore',
+ :desc => "Don't show link info for urls from users represented as hostmasks on this list. Useful for ignoring other bots, for example.",
+ :default => [])
def initialize
super
@bot.config.items[:'url.display_link_info'].set_string(@bot.config['url.display_link_info'].to_s)
end
reset_no_info_hosts
+ self.filter_group = :htmlinfo
+ load_filters
end
def reset_no_info_hosts
url = uri_str.kind_of?(URI) ? uri_str : URI.parse(uri_str)
return if url.scheme !~ /https?/
- if url.host =~ @no_info_hosts
- return "Sorry, info retrieval for #{url.host} is disabled"
+ # also check the ip, the canonical name and the aliases
+ begin
+ checks = TCPSocket.gethostbyname(url.host)
+ checks.delete_at(-2)
+ rescue => e
+ return "Unable to retrieve info for #{url.host}: #{e.message}"
+ end
+
+ checks << url.host
+ checks.flatten!
+
+ unless checks.grep(@no_info_hosts).empty?
+ return ( opts[:always_reply] ? "Sorry, info retrieval for #{url.host} (#{checks.first}) is disabled" : false )
end
logopts = opts.dup
title = nil
- extra = String.new
+ extra = []
begin
- debug "+ getting #{url.request_uri}"
- @bot.httputil.get_response(url) { |resp|
- case resp
- when Net::HTTPSuccess
-
- debug resp.to_hash
-
- if resp['content-type'] =~ /^text\/|(?:x|ht)ml/
- # The page is text or HTML, so we can try finding a title and, if
- # requested, the first par.
- #
- # We act differently depending on whether we want the first par or
- # not: in the first case we download the initial part and the parse
- # it; in the second case we only download as much as we need to find
- # the title
- #
- if @bot.config['url.first_par']
- partial = resp.partial_body(@bot.config['http.info_bytes'])
- logopts[:title] = title = get_title_from_html(partial)
- if url.fragment and not url.fragment.empty?
- fragreg = /.*?<a\s+[^>]*name=["']?#{url.fragment}["']?.*?>/im
- partial.sub!(fragreg,'')
- end
- first_par = Utils.ircify_first_html_par(partial, :strip => title)
- unless first_par.empty?
- logopts[:extra] = first_par
- extra << ", #{Bold}text#{Bold}: #{first_par}"
- end
- call_event(:url_added, url.to_s, logopts)
- return "#{Bold}title#{Bold}: #{title}#{extra}" if title
- else
- resp.partial_body(@bot.config['http.info_bytes']) { |part|
- logopts[:title] = title = get_title_from_html(part)
- call_event(:url_added, url.to_s, logopts)
- return "#{Bold}title#{Bold}: #{title}" if title
- }
- end
- # if nothing was found, provide more basic info, as for non-html pages
- else
- resp.no_cache = true
- end
+ debug "+ getting info for #{url.request_uri}"
+ info = @bot.filter(:htmlinfo, url)
+ debug info
+ resp = info[:headers]
- enc = resp['content-encoding']
- logopts[:extra] = String.new
- logopts[:extra] << "Content Type: #{resp['content-type']}"
- if enc
- logopts[:extra] << ", encoding: #{enc}"
- extra << ", #{Bold}encoding#{Bold}: #{enc}"
- end
+ logopts[:title] = title = info[:title]
- unless @bot.config['url.titles_only']
- # content doesn't have title, just display info.
- size = resp['content-length'].gsub(/(\d)(?=\d{3}+(?:\.|$))(\d{3}\..*)?/,'\1,\2') rescue nil
- if size
- logopts[:extra] << ", size: #{size} bytes"
- size = ", #{Bold}size#{Bold}: #{size} bytes"
- end
- call_event(:url_added, url.to_s, logopts)
- return "#{Bold}type#{Bold}: #{resp['content-type']}#{size}#{extra}"
- end
- call_event(:url_added, url.to_s, logopts)
- else
- raise UrlLinkError, "getting link (#{resp.code} - #{resp.message})"
+ if info[:content]
+ logopts[:extra] = info[:content]
+ extra << "#{Bold}text#{Bold}: #{info[:content]}" if @bot.config['url.first_par']
+ else
+ logopts[:extra] = String.new
+ logopts[:extra] << "Content Type: #{resp['content-type']}"
+ extra << "#{Bold}type#{Bold}: #{resp['content-type']}" unless title
+ if enc = resp['content-encoding']
+ logopts[:extra] << ", encoding: #{enc}"
+ extra << "#{Bold}encoding#{Bold}: #{enc}" if @bot.config['url.first_par'] or not title
+ end
+
+ size = resp['content-length'].first.gsub(/(\d)(?=\d{3}+(?:\.|$))(\d{3}\..*)?/,'\1,\2') rescue nil
+ if size
+ logopts[:extra] << ", size: #{size} bytes"
+ extra << "#{Bold}size#{Bold}: #{size} bytes" if @bot.config['url.first_par'] or not title
end
- }
- return nil
+ end
rescue Exception => e
case e
when UrlLinkError
raise "connecting to site/processing information (#{e.message})"
end
end
+
+ call_event(:url_added, url.to_s, logopts)
+ if title
+ extra.unshift("#{Bold}title#{Bold}: #{title}")
+ end
+ return extra.join(", ") if title or not @bot.config['url.titles_only']
end
- def handle_urls(m, urls, display_info=@bot.config['url.display_link_info'])
+ def handle_urls(m, params={})
+ opts = {
+ :display_info => @bot.config['url.display_link_info'],
+ :channels => @bot.config['url.only_on_channels'],
+ :ignore => @bot.config['url.ignore']
+ }.merge params
+ urls = opts[:urls]
+ display_info= opts[:display_info]
+ channels = opts[:channels]
+ ignore = opts[:ignore]
+
+ unless channels.empty?
+ return unless channels.map { |c| c.downcase }.include?(m.channel.downcase)
+ end
+
+ ignore.each { |u| return if m.source.matches?(u) }
+
return if urls.empty?
debug "found urls #{urls.inspect}"
list = m.public? ? @registry[m.target] : nil
urls_displayed = 0
urls.each do |urlstr|
debug "working on #{urlstr}"
- next unless urlstr =~ /^https?:/
+ next unless urlstr =~ /^https?:\/\/./
title = nil
debug "Getting title for #{urlstr}..."
+ reply = nil
begin
title = get_title_for_url(urlstr,
+ :always_reply => m.address?,
:nick => m.source.nick,
:channel => m.channel,
:ircline => m.message)
debug "Title #{title ? '' : 'not '} found"
+ reply = "#{LINK_INFO} #{title}" if title
rescue => e
- m.reply "Error #{e.message}"
+ debug e
+ # we might get a 404 because of trailing punctuation, so we try again
+ # with the last character stripped. this might generate invalid URIs
+ # (e.g. because "some.url" gets chopped to some.url%2, so catch that too
+ if e.message =~ /\(404 - Not Found\)/i or e.kind_of?(URI::InvalidURIError)
+ # chop off last character, and retry if we still have enough string to
+ # look like a minimal URL
+ retry if urlstr.chop! and urlstr =~ /^https?:\/\/./
+ end
+ reply = "Error #{e.message}"
end
if display_info > urls_displayed
- if title
- m.reply("#{LINK_INFO} #{title}", :overlong => :truncate)
+ if reply
+ m.reply reply, :overlong => :truncate, :to => :public,
+ :nick => (m.address? ? :auto : false)
urls_displayed += 1
end
end
def info(m, params)
escaped = URI.escape(params[:urls].to_s, OUR_UNSAFE)
urls = URI.extract(escaped)
- Thread.new { handle_urls(m, urls, params[:urls].length) }
+ Thread.new do
+ handle_urls(m,
+ :urls => urls,
+ :display_info => params[:urls].length,
+ :channels => [])
+ end
end
- def listen(m)
- return unless m.kind_of?(PrivMessage)
+ def message(m)
return if m.address?
escaped = URI.escape(m.message, OUR_UNSAFE)
- urls = URI.extract(escaped)
- Thread.new { handle_urls(m, urls) }
+ urls = URI.extract(escaped, ['http', 'https'])
+ return if urls.empty?
+ Thread.new { handle_urls(m, :urls => urls) }
end
def reply_urls(opts={})