X-Git-Url: https://git.netwichtig.de/gitweb/?a=blobdiff_plain;f=data%2Frbot%2Fplugins%2Furl.rb;h=d08c24e837fea4fe2bb88b6829a7ba5e322fef8a;hb=1e841175468b3e0357ab278a226a237fe4d7687e;hp=0809288f3c260ba5c8558dc1118e824552e3fee6;hpb=2da3a85740963a5dc4e9390115e13139f97511e2;p=user%2Fhenk%2Fcode%2Fruby%2Frbot.git diff --git a/data/rbot/plugins/url.rb b/data/rbot/plugins/url.rb index 0809288f..d08c24e8 100644 --- a/data/rbot/plugins/url.rb +++ b/data/rbot/plugins/url.rb @@ -5,9 +5,6 @@ define_structure :Url, :channel, :nick, :time, :url, :info -class ::UrlLinkError < RuntimeError -end - class UrlPlugin < Plugin LINK_INFO = "[Link Info]" OUR_UNSAFE = Regexp.new("[^#{URI::PATTERN::UNRESERVED}#{URI::PATTERN::RESERVED}%# ]", false, 'N') @@ -60,82 +57,52 @@ class UrlPlugin < Plugin url = uri_str.kind_of?(URI) ? uri_str : URI.parse(uri_str) return if url.scheme !~ /https?/ - if url.host =~ @no_info_hosts - return "Sorry, info retrieval for #{url.host} is disabled" + # also check the ip, the canonical name and the aliases + begin + checks = TCPSocket.gethostbyname(url.host) + checks.delete_at(-2) + rescue => e + return "Unable to retrieve info for #{url.host}: #{e.message}" + end + + checks << url.host + checks.flatten! + + unless checks.grep(@no_info_hosts).empty? + return "Sorry, info retrieval for #{url.host} (#{checks.first}) is disabled" end logopts = opts.dup title = nil - extra = String.new + extra = [] begin - debug "+ getting #{url.request_uri}" - @bot.httputil.get_response(url) { |resp| - case resp - when Net::HTTPSuccess - - debug resp.to_hash - - if resp['content-type'] =~ /^text\/|(?:x|ht)ml/ - # The page is text or HTML, so we can try finding a title and, if - # requested, the first par. - # - # We act differently depending on whether we want the first par or - # not: in the first case we download the initial part and the parse - # it; in the second case we only download as much as we need to find - # the title - # - if @bot.config['url.first_par'] - partial = resp.partial_body(@bot.config['http.info_bytes']) - logopts[:title] = title = get_title_from_html(partial) - if url.fragment and not url.fragment.empty? - fragreg = /.*?]*name=["']?#{url.fragment}["']?.*?>/im - partial.sub!(fragreg,'') - end - first_par = Utils.ircify_first_html_par(partial, :strip => title) - unless first_par.empty? - logopts[:extra] = first_par - extra << ", #{Bold}text#{Bold}: #{first_par}" - end - call_event(:url_added, url.to_s, logopts) - return "#{Bold}title#{Bold}: #{title}#{extra}" if title - else - resp.partial_body(@bot.config['http.info_bytes']) { |part| - logopts[:title] = title = get_title_from_html(part) - call_event(:url_added, url.to_s, logopts) - return "#{Bold}title#{Bold}: #{title}" if title - } - end - # if nothing was found, provide more basic info, as for non-html pages - else - resp.no_cache = true - end + debug "+ getting info for #{url.request_uri}" + info = @bot.filter(:htmlinfo, url) + debug info + resp = info[:headers] - enc = resp['content-encoding'] - logopts[:extra] = String.new - logopts[:extra] << "Content Type: #{resp['content-type']}" - if enc - logopts[:extra] << ", encoding: #{enc}" - extra << ", #{Bold}encoding#{Bold}: #{enc}" - end + logopts[:title] = title = info[:title] - unless @bot.config['url.titles_only'] - # content doesn't have title, just display info. - size = resp['content-length'].gsub(/(\d)(?=\d{3}+(?:\.|$))(\d{3}\..*)?/,'\1,\2') rescue nil - if size - logopts[:extra] << ", size: #{size} bytes" - size = ", #{Bold}size#{Bold}: #{size} bytes" - end - call_event(:url_added, url.to_s, logopts) - return "#{Bold}type#{Bold}: #{resp['content-type']}#{size}#{extra}" - end - call_event(:url_added, url.to_s, logopts) - else - raise UrlLinkError, "getting link (#{resp.code} - #{resp.message})" + if info[:content] + logopts[:extra] = info[:content] + extra << "#{Bold}text#{Bold}: #{info[:content]}" if @bot.config['url.first_par'] + else + logopts[:extra] = String.new + logopts[:extra] << "Content Type: #{resp['content-type']}" + extra << "#{Bold}type#{Bold}: #{resp['content-type']}" unless title + if enc = resp['content-encoding'] + logopts[:extra] << ", encoding: #{enc}" + extra << "#{Bold}encoding#{Bold}: #{enc}" if @bot.config['url.first_par'] or not title + end + + size = resp['content-length'].first.gsub(/(\d)(?=\d{3}+(?:\.|$))(\d{3}\..*)?/,'\1,\2') rescue nil + if size + logopts[:extra] << ", size: #{size} bytes" + extra << "#{Bold}size#{Bold}: #{size} bytes" if @bot.config['url.first_par'] or not title end - } - return nil + end rescue Exception => e case e when UrlLinkError @@ -145,6 +112,12 @@ class UrlPlugin < Plugin raise "connecting to site/processing information (#{e.message})" end end + + call_event(:url_added, url.to_s, logopts) + if title + extra.unshift("#{Bold}title#{Bold}: #{title}") + end + return extra.join(", ") if title or not @bot.config['url.titles_only'] end def handle_urls(m, urls, display_info=@bot.config['url.display_link_info']) @@ -158,19 +131,30 @@ class UrlPlugin < Plugin next unless urlstr =~ /^https?:/ title = nil debug "Getting title for #{urlstr}..." + reply = nil begin title = get_title_for_url(urlstr, :nick => m.source.nick, :channel => m.channel, :ircline => m.message) debug "Title #{title ? '' : 'not '} found" + reply = "#{LINK_INFO} #{title}" if title rescue => e - m.reply "Error #{e.message}" + debug e + # we might get a 404 because of trailing punctuation, so we try again + # with the last character stripped. this might generate invalid URIs + # (e.g. because "some.url" gets chopped to some.url%2, so catch that too + if e.message =~ /\(404 - Not Found\)/i or e.kind_of?(URI::InvalidURIError) + # chop off last character, and retry if we still have enough string to + # look like a minimal URL + retry if urlstr.chop! and urlstr =~ /^https?:\/\/./ + end + reply = "Error #{e.message}" end if display_info > urls_displayed - if title - m.reply("#{LINK_INFO} #{title}", :overlong => :truncate) + if reply + m.plainreply(reply, :overlong => :truncate) urls_displayed += 1 end end @@ -201,7 +185,8 @@ class UrlPlugin < Plugin return if m.address? escaped = URI.escape(m.message, OUR_UNSAFE) - urls = URI.extract(escaped) + urls = URI.extract(escaped, ['http', 'https']) + return if urls.empty? Thread.new { handle_urls(m, urls) } end