# TODO allow selection of only quotes with vote > 0
require 'rexml/document'
-require 'uri/common'
class ::BashQuote
attr_accessor :num, :text, :vote
end
def search(m, params)
- esc = URI.escape(params[:words].to_s)
+ esc = CGI.escape(params[:words].to_s)
html = @bot.httputil.get("http://bash.org/?search=#{esc}")
html_bash(m, :html => html)
end
justcheck = params[:justcheck]\r
\r
word = params[:word].downcase\r
- url = @dmwapurl % URI.escape(word)\r
+ url = @dmwapurl % CGI.escape(word)\r
xml = nil\r
info = @bot.httputil.get_response(url) rescue nil\r
xml = info.body if info\r
\r
word = params[:word].join\r
[word, word + "_1"].each { |check|\r
- url = @oxurl % URI.escape(check)\r
+ url = @oxurl % CGI.escape(check)\r
h = @bot.httputil.head(url, :max_redir => 5)\r
if h\r
m.reply("#{word} found: #{url}") unless justcheck\r
justcheck = params[:justcheck]\r
\r
word = params[:word].to_s.downcase\r
- url = @chambersurl % URI.escape(word)\r
+ url = @chambersurl % CGI.escape(word)\r
xml = nil\r
info = @bot.httputil.get_response(url) rescue nil\r
xml = info.body if info\r
return
end
- data_text = URI.escape trans_text
+ data_text = CGI.escape trans_text
trans_pair = "#{trans_from}_#{trans_to}"
if (trans_text =~ /^http:\/\//) && (URI.parse(trans_text) rescue nil)
require 'rexml/document'
-require 'uri/common'
class FreshmeatPlugin < Plugin
include REXML
max = params[:limit].to_i
search = params[:search].to_s
max = 8 if max > 8
- begin
- xml = @bot.httputil.get("http://freshmeat.net/search-xml/?orderby=locate_projectname_full_DESC&q=#{URI.escape(search)}")
- rescue URI::InvalidURIError, URI::BadURIError => e
- m.reply "illegal search string #{search}"
- return
- end
+ xml = @bot.httputil.get("http://freshmeat.net/search-xml/?orderby=locate_projectname_full_DESC&q=#{CGI.escape(search)}")
unless xml
m.reply "search for #{search} failed"
return
wc = @wordcache[:english]\r
return true if wc.key?(word.to_sym)\r
rules = @rules[:english]\r
- p = @bot.httputil.get(rules[:url] % URI.escape(word))\r
+ p = @bot.httputil.get(rules[:url] % CGI.escape(word))\r
if not p\r
error "could not connect!"\r
return false\r
ll = ('a'..'z').to_a[rand(26)]\r
random = [l,ll].join('*') + '*'\r
debug "getting random word from dictionary, matching #{random}"\r
- p = @bot.httputil.get(rules[:url] % URI.escape(random))\r
+ p = @bot.httputil.get(rules[:url] % CGI.escape(random))\r
debug p\r
lemmi = Array.new\r
good = rules[:good]\r
end
def search(rawstr, rawopts={})
- str = URI.escape(rawstr)
+ str = CGI.escape(rawstr)
str << ";site=aka" if @bot.config['imdb.aka']
opts = rawopts.dup
opts[:type] = :both unless opts[:type]
spec = location ? "in #{location}" : "by #{artist}"
begin
if location
- esc = URI.escape(location)
+ esc = CGI.escape(location)
page = @bot.httputil.get "#{LASTFM}/events/?findloc=#{esc}"
else
- esc = URI.escape(artist)
+ esc = CGI.escape(artist)
page = @bot.httputil.get "#{LASTFM}/events?s=#{esc}&findloc="
end
artist = params[:who].to_s
page = nil
begin
- esc = URI.escape(artist)
+ esc = URI.escape(CGI.escape(artist))
page = @bot.httputil.get "#{LASTFM}/music/#{esc}"
if page
if page.match(/<h1 class="h1artist"><a href="([^"]+)">(.*?)<\/a><\/h1>/)
def google(m, params)
what = params[:words].to_s
- searchfor = URI.escape what
+ searchfor = CGI.escape what
# This method is also called by other methods to restrict searching to some sites
if params[:site]
site = "site:#{params[:site]}+"
def gcalc(m, params)
what = params[:words].to_s
- searchfor = URI.escape(what).sub('+','%2B')
+ searchfor = CGI.escape(what)
debug "Getting gcalc thing: #{searchfor.inspect}"
url = "http://www.google.com/search?q=#{searchfor}"
require 'rexml/document'
-require 'uri/common'
class SlashdotPlugin < Plugin
include REXML
end
def search_slashdot(m, params)
- max = params[:limit].to_i
- search = params[:search].to_s
+ max = params[:limit].to_i
+ search = params[:search].to_s
- begin
- xml = @bot.httputil.get("http://slashdot.org/search.pl?content_type=rss&query=#{URI.escape(search)}")
- rescue URI::InvalidURIError, URI::BadURIError => e
- m.reply "illegal search string #{search}"
- return
- end
+ xml = @bot.httputil.get("http://slashdot.org/search.pl?content_type=rss&query=#{CGI.escape(search)}")
unless xml
m.reply "search for #{search} failed"
return
doc.elements.each("*/item") {|e|
desc = e.elements["title"].text
desc.gsub!(/(.{150}).*/, '\1..')
- reply = sprintf("%s | %s", e.elements["link"].text, desc)
+ reply = sprintf("%s | %s", e.elements["link"].text, desc.ircify_html)
m.reply reply
done += 1
break if done >= max
end
end
# we give a very high 'skip' because this will allow us to get the number of definitions by retrieving the previous definition
- uri = "http://www.urbanwap.com/search.php?term=#{URI.escape words}&skip=65536"
+ uri = "http://www.urbanwap.com/search.php?term=#{CGI.escape words}&skip=65536"
page = @bot.httputil.get(uri)
if page.nil?
m.reply "Couldn't retrieve an urban dictionary definition of #{words}"
n = numdefs
end
if n < numdefs
- uri = "http://www.urbanwap.com/search.php?term=#{URI.escape words}&skip=#{n-1}"
+ uri = "http://www.urbanwap.com/search.php?term=#{CGI.escape words}&skip=#{n-1}"
page = @bot.httputil.get(uri)
if page.nil?
case n % 10
def wu_station(m, where, units)
begin
- xml = @bot.httputil.get(@wu_station_url % [units, URI.escape(where)])
+ xml = @bot.httputil.get(@wu_station_url % [units, CGI.escape(where)])
case xml
when nil
m.reply "couldn't retrieve weather information, sorry"
def wu_weather(m, where, units)
begin
- xml = @bot.httputil.get(@wu_url % [units, URI.escape(where)])
+ xml = @bot.httputil.get(@wu_url % [units, CGI.escape(where)])
case xml
when nil
m.reply "couldn't retrieve weather information, sorry"