X-Git-Url: https://git.netwichtig.de/gitweb/?a=blobdiff_plain;f=data%2Frbot%2Fplugins%2Furl.rb;h=2cee5f4f5d37b2d1443b16355fe9ae60c086fc20;hb=62f5460b315213fe6e1d738a3dbc5ffbb7d6ce87;hp=ca1e6ed86225e6423e865e1602108626771cf343;hpb=4ee4a465fd25bd0d7e353f0f9e5b69fe165b3b30;p=user%2Fhenk%2Fcode%2Fruby%2Frbot.git diff --git a/data/rbot/plugins/url.rb b/data/rbot/plugins/url.rb index ca1e6ed8..2cee5f4f 100644 --- a/data/rbot/plugins/url.rb +++ b/data/rbot/plugins/url.rb @@ -1,149 +1,218 @@ -require 'uri' +#-- vim:sw=2:et +#++ +# +# :title: Url plugin -Url = Struct.new("Url", :channel, :nick, :time, :url) -TITLE_RE = /<\s*?title\s*?>(.+?)<\s*?\/title\s*?>/im +define_structure :Url, :channel, :nick, :time, :url, :info class UrlPlugin < Plugin - BotConfig.register BotConfigIntegerValue.new('url.max_urls', + LINK_INFO = "[Link Info]" + OUR_UNSAFE = Regexp.new("[^#{URI::PATTERN::UNRESERVED}#{URI::PATTERN::RESERVED}%# ]", false, 'N') + + Config.register Config::IntegerValue.new('url.max_urls', :default => 100, :validate => Proc.new{|v| v > 0}, :desc => "Maximum number of urls to store. New urls replace oldest ones.") - BotConfig.register BotConfigBooleanValue.new('url.display_link_info', + Config.register Config::IntegerValue.new('url.display_link_info', + :default => 0, + :desc => "Get the title of links pasted to the channel and display it (also tells if the link is broken or the site is down). Do it for at most this many links per line (set to 0 to disable)") + Config.register Config::BooleanValue.new('url.titles_only', + :default => false, + :desc => "Only show info for links that have tags (in other words, don't display info for jpegs, mpegs, etc.)") + Config.register Config::BooleanValue.new('url.first_par', + :default => false, + :desc => "Also try to get the first paragraph of a web page") + Config.register Config::BooleanValue.new('url.info_on_list', :default => false, - :desc => "Get the title of any links pasted to the channel and display it (also tells if the link is broken or the site is down)") + :desc => "Show link info when listing/searching for urls") + Config.register Config::ArrayValue.new('url.no_info_hosts', + :default => ['localhost', '^192\.168\.', '^10\.', '^127\.', '^172\.(1[6-9]|2\d|31)\.'], + :on_change => Proc.new { |bot, v| bot.plugins['url'].reset_no_info_hosts }, + :desc => "A list of regular expressions matching hosts for which no info should be provided") + def initialize super @registry.set_default(Array.new) + unless @bot.config['url.display_link_info'].kind_of?(Integer) + @bot.config.items[:'url.display_link_info'].set_string(@bot.config['url.display_link_info'].to_s) + end + reset_no_info_hosts + end + + def reset_no_info_hosts + @no_info_hosts = Regexp.new(@bot.config['url.no_info_hosts'].join('|'), true) + debug "no info hosts regexp set to #{@no_info_hosts}" end def help(plugin, topic="") - "urls [<max>=4] => list <max> last urls mentioned in current channel, urls search [<max>=4] <regexp> => search for matching urls. In a private message, you must specify the channel to query, eg. urls <channel> [max], urls search <channel> [max] <regexp>" + "url info <url> => display link info for <url> (set url.display_link_info > 0 if you want the bot to do it automatically when someone writes an url), urls [<max>=4] => list <max> last urls mentioned in current channel, urls search [<max>=4] <regexp> => search for matching urls. In a private message, you must specify the channel to query, eg. urls <channel> [max], urls search <channel> [max] <regexp>" end def get_title_from_html(pagedata) - return unless TITLE_RE.match(pagedata) - title = $1.strip.gsub(/\s*\n+\s*/, " ") - title = Utils.decode_html_entities title - title = title[0..255] if title.length > 255 - "[Link Info] title: #{title}" + return pagedata.ircify_html_title end - def read_data_from_response(response, amount) + def get_title_for_url(uri_str, opts = {}) - amount_read = 0 - chunks = [] + url = uri_str.kind_of?(URI) ? uri_str : URI.parse(uri_str) + return if url.scheme !~ /https?/ - response.read_body do |chunk| # read body now + # also check the ip, the canonical name and the aliases + begin + checks = TCPSocket.gethostbyname(url.host) + checks.delete_at(-2) + rescue => e + return "Unable to retrieve info for #{url.host}: #{e.message}" + end - amount_read += chunk.length + checks << url.host + checks.flatten! - if amount_read > amount - amount_of_overflow = amount_read - amount - chunk = chunk[0...-amount_of_overflow] - end + unless checks.grep(@no_info_hosts).empty? + return "Sorry, info retrieval for #{url.host} (#{checks.first}) is disabled" + end - chunks << chunk + logopts = opts.dup - break if amount_read >= amount + title = nil + extra = [] - end + begin + debug "+ getting info for #{url.request_uri}" + info = @bot.filter(:htmlinfo, url) + debug info + resp = info[:headers] - chunks.join('') + logopts[:title] = title = info[:title] - end + if info[:content] + logopts[:extra] = info[:content] + extra << "#{Bold}text#{Bold}: #{info[:content]}" if @bot.config['url.first_par'] + else + logopts[:extra] = String.new + logopts[:extra] << "Content Type: #{resp['content-type']}" + extra << "#{Bold}type#{Bold}: #{resp['content-type']}" unless title + if enc = resp['content-encoding'] + logopts[:extra] << ", encoding: #{enc}" + extra << "#{Bold}encoding#{Bold}: #{enc}" if @bot.config['url.first_par'] or not title + end - def get_title_for_url(uri_str, depth=@bot.config['http.max_redir']) - # This god-awful mess is what the ruby http library has reduced me to. - # Python's HTTP lib is so much nicer. :~( + size = resp['content-length'].first.gsub(/(\d)(?=\d{3}+(?:\.|$))(\d{3}\..*)?/,'\1,\2') rescue nil + if size + logopts[:extra] << ", size: #{size} bytes" + extra << "#{Bold}size#{Bold}: #{size} bytes" if @bot.config['url.first_par'] or not title + end + end + rescue Exception => e + case e + when UrlLinkError + raise e + else + error e + raise "connecting to site/processing information (#{e.message})" + end + end - if depth == 0 - raise "Error: Maximum redirects hit." + call_event(:url_added, url.to_s, logopts) + if title + extra.unshift("#{Bold}title#{Bold}: #{title}") end + return extra.join(", ") if title or not @bot.config['url.titles_only'] + end - debug "+ Getting #{uri_str.to_s}" - url = uri_str.kind_of?(URI) ? uri_str : URI.parse(uri_str) - return if url.scheme !~ /https?/ + def handle_urls(m, urls, display_info=@bot.config['url.display_link_info']) + return if urls.empty? + debug "found urls #{urls.inspect}" + list = m.public? ? @registry[m.target] : nil + debug "display link info: #{display_info}" + urls_displayed = 0 + urls.each do |urlstr| + debug "working on #{urlstr}" + next unless urlstr =~ /^https?:/ + title = nil + debug "Getting title for #{urlstr}..." + reply = nil + begin + title = get_title_for_url(urlstr, + :nick => m.source.nick, + :channel => m.channel, + :ircline => m.message) + debug "Title #{title ? '' : 'not '} found" + reply = "#{LINK_INFO} #{title}" if title + rescue => e + debug e + # we might get a 404 because of trailing punctuation, so we try again + # with the last character stripped. this might generate invalid URIs + # (e.g. because "some.url" gets chopped to some.url%2, so catch that too + if e.message =~ /\(404 - Not Found\)/i or e.kind_of?(URI::InvalidURIError) + # chop off last character, and retry if we still have enough string to + # look like a minimal URL + retry if urlstr.chop! and urlstr =~ /^https?:\/\/./ + end + reply = "Error #{e.message}" + end - title = nil + if display_info > urls_displayed + if reply + m.plainreply(reply, :overlong => :truncate) + urls_displayed += 1 + end + end + + next unless list + + # check to see if this url is already listed + next if list.find {|u| u.url == urlstr } + + url = Url.new(m.target, m.sourcenick, Time.new, urlstr, title) + debug "#{list.length} urls so far" + list.pop if list.length > @bot.config['url.max_urls'] + debug "storing url #{url.url}" + list.unshift url + debug "#{list.length} urls now" + end + @registry[m.target] = list + end - debug "+ connecting to #{url.host}:#{url.port}" - http = @bot.httputil.get_proxy(url) - http.start { |http| - - http.request_get(url.request_uri(), @bot.httputil.headers) { |response| - - case response - when Net::HTTPRedirection - # call self recursively if this is a redirect - redirect_to = response['location'] || '/' - debug "+ redirect location: #{redirect_to.inspect}" - url = URI.join(url.to_s, redirect_to) - debug "+ whee, redirecting to #{url.to_s}!" - return get_title_for_url(url, depth-1) - when Net::HTTPSuccess - if response['content-type'] =~ /^text\// - # since the content is 'text/*' and is small enough to - # be a webpage, retrieve the title from the page - debug "+ getting #{url.request_uri}" - # was 5*10^4 ... seems to much to me ... 4k should be enough for everybody ;) - data = read_data_from_response(response, 4096) - return get_title_from_html(data) - else - # content doesn't have title, just display info. - size = response['content-length'].gsub(/(\d)(?=\d{3}+(?:\.|$))(\d{3}\..*)?/,'\1,\2') - size = size ? ", size: #{size} bytes" : "" - return "[Link Info] type: #{response['content-type']}#{size}" - end - else - return "[Link Info] Error getting link (#{response.code} - #{response.message})" - end # end of "case response" - - } # end of request block - } # end of http start block - - return title - - rescue SocketError => e - return "[Link Info] Error connecting to site (#{e.message})" + def info(m, params) + escaped = URI.escape(params[:urls].to_s, OUR_UNSAFE) + urls = URI.extract(escaped) + Thread.new { handle_urls(m, urls, params[:urls].length) } end - def listen(m) - return unless m.kind_of?(PrivMessage) + def message(m) return if m.address? - # TODO support multiple urls in one line - if m.message =~ /(f|ht)tps?:\/\// - if m.message =~ /((f|ht)tps?:\/\/.*?)(?:\s+|$)/ - urlstr = $1 - list = @registry[m.target] - - if @bot.config['url.display_link_info'] - debug "Getting title for #{urlstr}..." - begin - title = get_title_for_url urlstr - if title - m.reply title - debug "Title found!" - else - debug "Title not found!" - end - rescue => e - debug "Failed: #{e}" - end - end - # check to see if this url is already listed - return if list.find {|u| u.url == urlstr } + escaped = URI.escape(m.message, OUR_UNSAFE) + urls = URI.extract(escaped, ['http', 'https']) + return if urls.empty? + Thread.new { handle_urls(m, urls) } + end - url = Url.new(m.target, m.sourcenick, Time.new, urlstr) - debug "#{list.length} urls so far" - if list.length > @bot.config['url.max_urls'] - list.pop + def reply_urls(opts={}) + list = opts[:list] + max = opts[:max] + channel = opts[:channel] + m = opts[:msg] + return unless list and max and m + list[0..(max-1)].each do |url| + disp = "[#{url.time.strftime('%Y/%m/%d %H:%M:%S')}] <#{url.nick}> #{url.url}" + if @bot.config['url.info_on_list'] + title = url.info || + get_title_for_url(url.url, + :nick => url.nick, :channel => channel) rescue nil + # If the url info was missing and we now have some, try to upgrade it + if channel and title and not url.info + ll = @registry[channel] + debug ll + if el = ll.find { |u| u.url == url.url } + el.info = title + @registry[channel] = ll + end end - debug "storing url #{url.url}" - list.unshift url - debug "#{list.length} urls now" - @registry[m.target] = list + disp << " --> #{title}" if title end + m.reply disp, :overlong => :truncate end end @@ -156,9 +225,7 @@ class UrlPlugin < Plugin if list.empty? m.reply "no urls seen yet for channel #{channel}" else - list[0..(max-1)].each do |url| - m.reply "[#{url.time.strftime('%Y/%m/%d %H:%M:%S')}] <#{url.nick}> #{url.url}" - end + reply_urls :msg => m, :channel => channel, :list => list, :max => max end end @@ -170,18 +237,20 @@ class UrlPlugin < Plugin max = 1 if max < 1 regex = Regexp.new(string, Regexp::IGNORECASE) list = @registry[channel].find_all {|url| - regex.match(url.url) || regex.match(url.nick) + regex.match(url.url) || regex.match(url.nick) || + (@bot.config['url.info_on_list'] && regex.match(url.info)) } if list.empty? m.reply "no matches for channel #{channel}" else - list[0..(max-1)].each do |url| - m.reply "[#{url.time.strftime('%Y/%m/%d %H:%M:%S')}] <#{url.nick}> #{url.url}" - end + reply_urls :msg => m, :channel => channel, :list => list, :max => max end end end + plugin = UrlPlugin.new +plugin.map 'urls info *urls', :action => 'info' +plugin.map 'url info *urls', :action => 'info' plugin.map 'urls search :channel :limit :string', :action => 'search', :defaults => {:limit => 4}, :requirements => {:limit => /^\d+$/},