def digg(m, params)
max = params[:limit].to_i
- puts "max is #{max}"
+ debug "max is #{max}"
xml = @bot.httputil.get(URI.parse("http://digg.com/rss/index.xml"))
unless xml
m.reply "digg news parse failed"
def info(rawstr)
sr = search(rawstr)
if !sr
- puts "IMDB : search returned NIL"
+ debug "IMDB: search returned NIL"
return nil
end
resp, data = @http.get(sr, "User-Agent" =>
# import if old file format found
if(File.exist?("#{@bot.botclass}/karma.rbot"))
- puts "importing old karma data"
+ log "importing old karma data"
IO.foreach("#{@bot.botclass}/karma.rbot") do |line|
if(line =~ /^(\S+)<=>([\d-]+)$/)
item = $1
#{{{
def save
Dir.mkdir("#{@bot.botclass}/lart") if not FileTest.directory? "#{@bot.botclass}/lart"
+ # TODO implement safe saving here too
File.open("#{@bot.botclass}/lart/larts", "w") { |file|
file.puts @larts
}
end
m.reply answer
rescue Exception => e
- puts "couldn't evaluate expression \"#{m.params}\": #{e}"
+ error "couldn't evaluate expression \"#{m.params}\": #{e.inspect}"
m.reply "illegal expression \"#{m.params}\""
return
end
File.rename("#{@bot.botclass}/quotes/new/#{channel}",
"#{@bot.botclass}/quotes/#{channel}")
rescue => e
- $stderr.puts "failed to write quotefile for channel #{channel}!\n#{$!}"
- debug "#{e.class}: #{e}"
- debug e.backtrace.join("\n")
+ error "failed to write quotefile for channel #{channel}!\n#{$!}"
+ error "#{e.class}: #{e}"
+ error e.backtrace.join("\n")
end
}
end
def remind(m, params)
who = params.has_key?(:who) ? params[:who] : m.sourcenick
string = params[:string].to_s
- puts "in remind, string is: #{string}"
+ debug "in remind, string is: #{string}"
if(string =~ /^(.*)\s+in\s+(.*)$/)
subject = $1
period = $2
m.reply "search for #{search} failed"
return
end
- puts xml.inspect
+ debug xml.inspect
begin
doc = Document.new xml
rescue REXML::ParseException => e
- puts e
+ warning e.inspect
m.reply "couldn't parse output XML: #{e.class}"
return
end
m.reply "search for #{search} failed"
return
end
- puts doc.inspect
+ debug doc.inspect
max = 8 if max > 8
done = 0
doc.elements.each("*/item") {|e|
end
def slashdot(m, params)
- puts params.inspect
+ debug params.inspect
max = params[:limit].to_i
- puts "max is #{max}"
+ debug "max is #{max}"
xml = @bot.httputil.get(URI.parse("http://slashdot.org/slashdot.xml"))
unless xml
m.reply "slashdot news parse failed"
raise "Error: Maximum redirects hit."
end
- puts "+ Getting #{uri_str}"
+ debug "+ Getting #{uri_str}"
url = URI.parse(uri_str)
return if url.scheme !~ /https?/
title = nil
- puts "+ connecting to #{url.host}:#{url.port}"
+ debug "+ connecting to #{url.host}:#{url.port}"
http = @bot.httputil.get_proxy(url)
http.start { |http|
url.path = '/' if url.path == ''
when Net::HTTPRedirection, Net::HTTPMovedPermanently then
# call self recursively if this is a redirect
redirect_to = response['location'] || './'
- puts "+ redirect location: #{redirect_to.inspect}"
+ debug "+ redirect location: #{redirect_to.inspect}"
url = URI.join url.to_s, redirect_to
- puts "+ whee, redirecting to #{url.to_s}!"
+ debug "+ whee, redirecting to #{url.to_s}!"
return get_title_for_url(url.to_s, depth-1)
when Net::HTTPSuccess then
if response['content-type'] =~ /^text\//
# since the content is 'text/*' and is small enough to
# be a webpage, retrieve the title from the page
- puts "+ getting #{url.request_uri}"
+ debug "+ getting #{url.request_uri}"
data = read_data_from_response(response, 50000)
return get_title_from_html(data)
else
hostname = err.message
retry
rescue StandardError => err
- puts err
+ error err.inspect
m.reply "couldn't connect to #{uri.host}:#{uri.port} :("
return
end