5 # :title: rbot utilities provider
7 # Author:: Tom Gilbert <tom@linuxbrit.co.uk>
8 # Author:: Giuseppe "Oblomov" Bilotta <giuseppe.bilotta@gmail.com>
10 # TODO some of these Utils should be rewritten as extensions to the approriate
11 # standard Ruby classes and accordingly be moved to extends.rb
16 # Try to load htmlentities, fall back to an HTML escape table.
18 require 'htmlentities'
111 AFTER_PAR_PATH = /^(?:div|span)$/
112 AFTER_PAR_EX = /^(?:td|tr|tbody|table)$/
113 AFTER_PAR_CLASS = /body|message|text/i
119 # Some regular expressions to manage HTML data
122 TITLE_REGEX = /<\s*?title\s*?>(.+?)<\s*?\/title\s*?>/im
125 HX_REGEX = /<h(\d)(?:\s+[^>]*)?>(.*?)<\/h\1>/im
127 PAR_REGEX = /<p(?:\s+[^>]*)?>.*?<\/?(?:p|div|html|body|table|td|tr)(?:\s+[^>]*)?>/im
129 # Some blogging and forum platforms use spans or divs with a 'body' or 'message' or 'text' in their class
130 # to mark actual text
131 AFTER_PAR1_REGEX = /<\w+\s+[^>]*(?:body|message|text|post)[^>]*>.*?<\/?(?:p|div|html|body|table|td|tr)(?:\s+[^>]*)?>/im
133 # At worst, we can try stuff which is comprised between two <br>
134 AFTER_PAR2_REGEX = /<br(?:\s+[^>]*)?\/?>.*?<\/?(?:br|p|div|html|body|table|td|tr)(?:\s+[^>]*)?\/?>/im
141 # Miscellaneous useful functions
146 SEC_PER_HR = SEC_PER_MIN * 60
148 SEC_PER_DAY = SEC_PER_HR * 24
150 SEC_PER_WK = SEC_PER_DAY * 7
151 # Seconds per (30-day) month
152 SEC_PER_MNTH = SEC_PER_DAY * 30
153 # Second per (non-leap) year
154 SEC_PER_YR = SEC_PER_DAY * 365
156 # Auxiliary method needed by Utils.secs_to_string
157 def Utils.secs_to_string_case(array, var, string, plural)
160 array << "1 #{string}"
162 array << "#{var} #{plural}"
166 # Turn a number of seconds into a human readable string, e.g
167 # 2 days, 3 hours, 18 minutes and 10 seconds
168 def Utils.secs_to_string(secs)
170 years, secs = secs.divmod SEC_PER_YR
171 secs_to_string_case(ret, years, _("year"), _("years")) if years > 0
172 months, secs = secs.divmod SEC_PER_MNTH
173 secs_to_string_case(ret, months, _("month"), _("months")) if months > 0
174 days, secs = secs.divmod SEC_PER_DAY
175 secs_to_string_case(ret, days, _("day"), _("days")) if days > 0
176 hours, secs = secs.divmod SEC_PER_HR
177 secs_to_string_case(ret, hours, _("hour"), _("hours")) if hours > 0
178 mins, secs = secs.divmod SEC_PER_MIN
179 secs_to_string_case(ret, mins, _("minute"), _("minutes")) if mins > 0
181 secs_to_string_case(ret, secs, _("second"), _("seconds")) if secs > 0 or ret.empty?
184 raise "Empty ret array!"
188 return [ret[0, ret.length-1].join(", ") , ret[-1]].join(_(" and "))
192 # Turn a number of seconds into a hours:minutes:seconds e.g.
193 # 3:18:10 or 5'12" or 7s
195 def Utils.secs_to_short(seconds)
196 secs = seconds.to_i # make sure it's an integer
197 mins, secs = secs.divmod 60
198 hours, mins = mins.divmod 60
200 return ("%s:%s:%s" % [hours, mins, secs])
202 return ("%s'%s\"" % [mins, secs])
204 return ("%ss" % [secs])
208 # Returns human readable time.
212 # :start_date, sets the time to measure against, defaults to now
213 # :date_format, used with <tt>to_formatted_s<tt>, default to :default
214 def Utils.timeago(time, options = {})
215 start_date = options.delete(:start_date) || Time.new
216 date_format = options.delete(:date_format) || "%x"
217 delta = (start_date - time).round
221 distance = Utils.age_string(delta)
223 _("%{d} from now") % {:d => distance}
225 _("%{d} ago") % {:d => distance}
230 # Converts age in seconds to "nn units". Inspired by previous attempts
231 # but also gitweb's age_string() sub
232 def Utils.age_string(secs)
235 Utils.age_string(-secs)
236 when secs > 2*SEC_PER_YR
237 _("%{m} years") % { :m => secs/SEC_PER_YR }
238 when secs > 2*SEC_PER_MNTH
239 _("%{m} months") % { :m => secs/SEC_PER_MNTH }
240 when secs > 2*SEC_PER_WK
241 _("%{m} weeks") % { :m => secs/SEC_PER_WK }
242 when secs > 2*SEC_PER_DAY
243 _("%{m} days") % { :m => secs/SEC_PER_DAY }
244 when secs > 2*SEC_PER_HR
245 _("%{m} hours") % { :m => secs/SEC_PER_HR }
246 when (20*SEC_PER_MIN..40*SEC_PER_MIN).include?(secs)
248 when (50*SEC_PER_MIN..70*SEC_PER_MIN).include?(secs)
249 # _("about one hour")
251 when (80*SEC_PER_MIN..100*SEC_PER_MIN).include?(secs)
252 _("an hour and a half")
253 when secs > 2*SEC_PER_MIN
254 _("%{m} minutes") % { :m => secs/SEC_PER_MIN }
256 _("%{m} seconds") % { :m => secs }
262 # Execute an external program, returning a String obtained by redirecting
263 # the program's standards errors and output
265 # TODO: find a way to expose some common errors (e.g. Errno::NOENT)
267 def Utils.safe_exec(command, *args)
268 output = IO.popen("-") { |p|
270 break p.readlines.join("\n")
273 $stderr.reopen($stdout)
275 rescue Exception => e
276 puts "exception #{e.pretty_inspect} trying to run #{command}"
279 puts "exec of #{command} failed"
283 raise "safe execution of #{command} returned #{$?}" unless $?.success?
287 # Try executing an external program, returning true if the run was successful
288 # and false otherwise
289 def Utils.try_exec(command, *args)
293 $stderr.reopen($stdout)
295 rescue Exception => e
308 # Decode HTML entities in the String _str_, using HTMLEntities if the
309 # package was found, or UNESCAPE_TABLE otherwise.
312 if defined? ::HTMLEntities
313 if ::HTMLEntities.respond_to? :decode_entities
314 def Utils.decode_html_entities(str)
315 return HTMLEntities.decode_entities(str)
318 @@html_entities = HTMLEntities.new
319 def Utils.decode_html_entities(str)
320 return @@html_entities.decode str
324 def Utils.decode_html_entities(str)
325 return str.gsub(/(&(.+?);)/) {
327 # remove the 0-paddng from unicode integers
329 when /^#x([0-9a-fA-F]+)$/
330 symbol = $1.to_i(16).to_s
332 symbol = $1.to_i.to_s
335 # output the symbol's irc-translated character, or a * if it's unknown
336 UNESCAPE_TABLE[symbol] || (symbol.match(/^\d+$/) ? [symbol.to_i].pack("U") : '*')
341 # Try to grab and IRCify the first HTML par (<p> tag) in the given string.
342 # If possible, grab the one after the first heading
344 # It is possible to pass some options to determine how the stripping
345 # occurs. Currently supported options are
346 # strip:: Regex or String to strip at the beginning of the obtained
348 # min_spaces:: minimum number of spaces a paragraph should have
350 def Utils.ircify_first_html_par(xml_org, opts={})
351 if defined? ::Hpricot
352 Utils.ircify_first_html_par_wh(xml_org, opts)
354 Utils.ircify_first_html_par_woh(xml_org, opts)
358 # HTML first par grabber using hpricot
359 def Utils.ircify_first_html_par_wh(xml_org, opts={})
360 doc = Hpricot(xml_org)
362 # Strip styles and scripts
363 (doc/"style|script").remove
368 strip = Regexp.new(/^#{Regexp.escape(strip)}/) if strip.kind_of?(String)
370 min_spaces = opts[:min_spaces] || 8
371 min_spaces = 0 if min_spaces < 0
375 pre_h = pars = by_span = nil
378 debug "Minimum number of spaces: #{min_spaces}"
380 # Initial attempt: <p> that follows <h\d>
382 pre_h = Hpricot::Elements[]
384 doc.search("*") { |e|
390 pre_h << e if found_h
393 debug "Hx: found: #{pre_h.pretty_inspect}"
398 txt = p.to_html.ircify_html
399 txt.sub!(strip, '') if strip
400 debug "(Hx attempt) #{txt.inspect} has #{txt.count(" ")} spaces"
401 break unless txt.empty? or txt.count(" ") < min_spaces
404 return txt unless txt.empty? or txt.count(" ") < min_spaces
406 # Second natural attempt: just get any <p>
407 pars = doc/"p" if pars.nil?
408 debug "par: found: #{pars.pretty_inspect}"
411 txt = p.to_html.ircify_html
412 txt.sub!(strip, '') if strip
413 debug "(par attempt) #{txt.inspect} has #{txt.count(" ")} spaces"
414 break unless txt.empty? or txt.count(" ") < min_spaces
417 return txt unless txt.empty? or txt.count(" ") < min_spaces
419 # Nothing yet ... let's get drastic: we look for non-par elements too,
420 # but only for those that match something that we know is likely to
423 # Some blogging and forum platforms use spans or divs with a 'body' or
424 # 'message' or 'text' in their class to mark actual text. Since we want
425 # the class match to be partial and case insensitive, we collect
426 # the common elements that may have this class and then filter out those
427 # we don't need. If no divs or spans are found, we'll accept additional
428 # elements too (td, tr, tbody, table).
430 by_span = Hpricot::Elements[]
431 extra = Hpricot::Elements[]
432 doc.search("*") { |el|
433 next if el.bogusetag?
436 by_span.push el if el[:class] =~ AFTER_PAR_CLASS or el[:id] =~ AFTER_PAR_CLASS
438 extra.push el if el[:class] =~ AFTER_PAR_CLASS or el[:id] =~ AFTER_PAR_CLASS
441 if by_span.empty? and not extra.empty?
444 debug "other \#1: found: #{by_span.pretty_inspect}"
449 txt = p.to_html.ircify_html
450 txt.sub!(strip, '') if strip
451 debug "(other attempt \#1) #{txt.inspect} has #{txt.count(" ")} spaces"
452 break unless txt.empty? or txt.count(" ") < min_spaces
455 return txt unless txt.empty? or txt.count(" ") < min_spaces
457 # At worst, we can try stuff which is comprised between two <br>
460 debug "Last candidate #{txt.inspect} has #{txt.count(" ")} spaces"
461 return txt unless txt.count(" ") < min_spaces
462 break if min_spaces == 0
467 # HTML first par grabber without hpricot
468 def Utils.ircify_first_html_par_woh(xml_org, opts={})
469 xml = xml_org.gsub(/<!--.*?-->/m,
470 "").gsub(/<script(?:\s+[^>]*)?>.*?<\/script>/im,
471 "").gsub(/<style(?:\s+[^>]*)?>.*?<\/style>/im,
472 "").gsub(/<select(?:\s+[^>]*)?>.*?<\/select>/im,
476 strip = Regexp.new(/^#{Regexp.escape(strip)}/) if strip.kind_of?(String)
478 min_spaces = opts[:min_spaces] || 8
479 min_spaces = 0 if min_spaces < 0
484 debug "Minimum number of spaces: #{min_spaces}"
485 header_found = xml.match(HX_REGEX)
488 while txt.empty? or txt.count(" ") < min_spaces
489 candidate = header_found[PAR_REGEX]
490 break unless candidate
491 txt = candidate.ircify_html
493 txt.sub!(strip, '') if strip
494 debug "(Hx attempt) #{txt.inspect} has #{txt.count(" ")} spaces"
498 return txt unless txt.empty? or txt.count(" ") < min_spaces
500 # If we haven't found a first par yet, try to get it from the whole
503 while txt.empty? or txt.count(" ") < min_spaces
504 candidate = header_found[PAR_REGEX]
505 break unless candidate
506 txt = candidate.ircify_html
508 txt.sub!(strip, '') if strip
509 debug "(par attempt) #{txt.inspect} has #{txt.count(" ")} spaces"
512 return txt unless txt.empty? or txt.count(" ") < min_spaces
514 # Nothing yet ... let's get drastic: we look for non-par elements too,
515 # but only for those that match something that we know is likely to
520 while txt.empty? or txt.count(" ") < min_spaces
521 candidate = header_found[AFTER_PAR1_REGEX]
522 break unless candidate
523 txt = candidate.ircify_html
525 txt.sub!(strip, '') if strip
526 debug "(other attempt \#1) #{txt.inspect} has #{txt.count(" ")} spaces"
529 return txt unless txt.empty? or txt.count(" ") < min_spaces
533 while txt.empty? or txt.count(" ") < min_spaces
534 candidate = header_found[AFTER_PAR2_REGEX]
535 break unless candidate
536 txt = candidate.ircify_html
538 txt.sub!(strip, '') if strip
539 debug "(other attempt \#2) #{txt.inspect} has #{txt.count(" ")} spaces"
542 debug "Last candidate #{txt.inspect} has #{txt.count(" ")} spaces"
543 return txt unless txt.count(" ") < min_spaces
544 break if min_spaces == 0
549 # This method extracts title, content (first par) and extra
550 # information from the given document _doc_.
552 # _doc_ can be an URI, a Net::HTTPResponse or a String.
554 # If _doc_ is a String, only title and content information
555 # are retrieved (if possible), using standard methods.
557 # If _doc_ is an URI or a Net::HTTPResponse, additional
558 # information is retrieved, and special title/summary
559 # extraction routines are used if possible.
561 def Utils.get_html_info(bot, doc, opts={})
564 Utils.get_string_html_info(doc, opts)
565 when Net::HTTPResponse
566 Utils.get_resp_html_info(bot, doc, opts)
569 bot.httputil.get_response(doc) { |resp|
570 ret.replace Utils.get_resp_html_info(bot, resp, opts)
578 class ::UrlLinkError < RuntimeError
581 # This method extracts title, content (first par) and extra
582 # information from the given Net::HTTPResponse _resp_.
584 # Currently, the only accepted options (in _opts_) are
585 # uri_fragment:: the URI fragment of the original request
586 # full_body:: get the whole body instead of
587 # bot.config['http.info_bytes'] bytes only
589 # Returns a DataStream with the following keys:
590 # text:: the (partial) body
591 # title:: the title of the document (if any)
592 # content:: the first paragraph of the document (if any)
594 # the headers of the Net::HTTPResponse. The value is
595 # a Hash whose keys are lowercase forms of the HTTP
596 # header fields, and whose values are Arrays.
598 def Utils.get_resp_html_info(bot, resp, opts={})
600 when Net::HTTPSuccess
601 loc = URI.parse(resp['x-rbot-location'] || resp['location']) rescue nil
602 if loc and loc.fragment and not loc.fragment.empty?
603 opts[:uri_fragment] ||= loc.fragment
605 ret = DataStream.new(opts.dup)
606 ret[:headers] = resp.to_hash
607 ret[:text] = partial = opts[:full_body] ? resp.body : resp.partial_body(bot.config['http.info_bytes'])
609 filtered = Utils.try_htmlinfo_filters(bot, ret)
613 elsif resp['content-type'] =~ /^text\/|(?:x|ht)ml/
614 ret.merge!(Utils.get_string_html_info(partial, opts))
618 raise UrlLinkError, "getting link (#{resp.code} - #{resp.message})"
622 # This method runs an appropriately-crafted DataStream _ds_ through the
623 # filters in the :htmlinfo filter group, in order. If one of the filters
624 # returns non-nil, its results are merged in _ds_ and returned. Otherwise
627 # The input DataStream should have the downloaded HTML as primary key
628 # (:text) and possibly a :headers key holding the resonse headers.
630 def Utils.try_htmlinfo_filters(bot, ds)
631 filters = bot.filter_names(:htmlinfo)
632 return nil if filters.empty?
634 # TODO filter priority
636 debug "testing htmlinfo filter #{n}"
637 cur = bot.filter(bot.global_filter_name(n, :htmlinfo), ds)
638 debug "returned #{cur.pretty_inspect}"
641 return ds.merge(cur) if cur
644 # HTML info filters often need to check if the webpage location
645 # of a passed DataStream _ds_ matches a given Regexp.
646 def Utils.check_location(ds, rx)
649 loc = [h['x-rbot-location'],h['location']].flatten.grep(rx)
653 return loc.empty? ? nil : loc
656 # This method extracts title and content (first par)
657 # from the given HTML or XML document _text_, using
658 # standard methods (String#ircify_html_title,
659 # Utils.ircify_first_html_par)
661 # Currently, the only accepted option (in _opts_) is
662 # uri_fragment:: the URI fragment of the original request
664 def Utils.get_string_html_info(text, opts={})
665 debug "getting string html info"
667 title = txt.ircify_html_title
669 if frag = opts[:uri_fragment] and not frag.empty?
670 fragreg = /<a\s+(?:[^>]+\s+)?(?:name|id)=["']?#{frag}["']?[^>]*>/im
673 if txt.match(fragreg)
674 # grab the post-match
680 c_opts[:strip] ||= title
681 content = Utils.ircify_first_html_par(txt, c_opts)
682 content = nil if content.empty?
683 return {:title => title, :content => content}
686 # Get the first pars of the first _count_ _urls_.
687 # The pages are downloaded using the bot httputil service.
688 # Returns an array of the first paragraphs fetched.
689 # If (optional) _opts_ :message is specified, those paragraphs are
690 # echoed as replies to the IRC message passed as _opts_ :message
692 def Utils.get_first_pars(bot, urls, count, opts={})
696 while count > 0 and urls.length > 0
701 info = Utils.get_html_info(bot, URI.parse(url), opts)
707 msg.reply "[#{idx}] #{par}", :overlong => :truncate if msg
711 debug "Unable to retrieve #{url}: #{$!}"
718 # Returns a comma separated list except for the last element
719 # which is joined in with specified conjunction
721 def Utils.comma_list(words, options={})
722 defaults = { :join_with => ", ", :join_last_with => _(" and ") }
723 opts = defaults.merge(options)
728 [words[0..-2].join(opts[:join_with]), words.last].join(opts[:join_last_with])