Web应用程序扫描XssPy

一个有力的事实是,微软、斯坦福、摩托罗拉、Informatica等很多大型企业机构都在用这款基于python的XSS(跨站脚本)漏洞扫描器。它的编写者Faizan Ahmad才华出众,XssPy是一个非常智能的工具,不仅能检查主页或给定页面,还能够检查网站上的所有链接以及子域。因此,XssPy的扫描非常细致且范围广泛。

 使用

反正我看了介绍那么好,就去下来看了
git://www.github.com/faizann24/XssPy.git

Git Clone代码到本地:
git clone http://www.github.com/faizann24/XssPy

kali已有
工具在 python 2.7上工作,你应该安装 mechanize。 如果未安装 mechanize,请在终端中键入”pip install mechanize”

用法:
python XssPy.py -u website.com -e

Python代码

  1. #!/usr/bin/env python
  2. import mechanize
  3. import sys
  4. import httplib
  5. import argparse
  6. import logging
  7. from urlparse import urlparse
  8.  
  9. br = mechanize.Browser() # initiating the browser
  10. br.addheaders = [
  11. ('User-agent',
  12. 'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11)Gecko/20071127 Firefox/2.0.0.11')
  13. ]
  14. br.set_handle_robots(False)
  15. br.set_handle_refresh(False)
  16.  
  17. payloads = ['<svg "ons>', '" onfocus="alert(1);', 'javascript:alert(1)']
  18. blacklist = ['.png', '.jpg', '.jpeg', '.mp3', '.mp4', '.avi', '.gif', '.svg',
  19. '.pdf']
  20. xssLinks = [] # TOTAL CROSS SITE SCRIPTING FINDINGS
  21.  
  22.  
  23. class color:
  24. BLUE = '\033[94m'
  25. RED = '\033[91m'
  26. GREEN = '\033[92m'
  27. YELLOW = '\033[93m'
  28. BOLD = '\033[1m'
  29. END = '\033[0m'
  30.  
  31. @staticmethod
  32. def log(lvl, col, msg):
  33. logger.log(lvl, col + msg + color.END)
  34.  
  35.  
  36. print color.BOLD + color.RED + """
  37. XssPy - Finding XSS made easier
  38. Author: Faizan Ahmad (Fsecurify)
  39. Email: fsecurify@gmail.com
  40. Usage: XssPy.py website.com (Not www.website.com OR http://www.website.com)
  41. Comprehensive Scan: python XssPy.py -u website.com -e
  42. Verbose logging: python XssPy.py -u website.com -v
  43. Cookies: python XssPy.py -u website.complex -c name=val name=val
  44.  
  45. Description: XssPy is a python tool for finding Cross Site Scripting
  46. vulnerabilities in websites. This tool is the first of its kind.
  47. Instead of just checking one page as most of the tools do, this tool
  48. traverses the website and find all the links and subdomains first.
  49. After that, it starts scanning each and every input on each and every
  50. page that it found while its traversal. It uses small yet effective
  51. payloads to search for XSS vulnerabilities. XSS in many high
  52. profile websites and educational institutes has been found
  53. by using this very tool.
  54. """ + color.END
  55.  
  56. logger = logging.getLogger(__name__)
  57. lh = logging.StreamHandler() # Handler for the logger
  58. logger.addHandler(lh)
  59. formatter = logging.Formatter('[%(asctime)s] %(message)s', datefmt='%H:%M:%S')
  60. lh.setFormatter(formatter)
  61.  
  62. parser = argparse.ArgumentParser()
  63. parser.add_argument('-u', action='store', dest='url',
  64. help='The URL to analyze')
  65. parser.add_argument('-e', action='store_true', dest='compOn',
  66. help='Enable comprehensive scan')
  67. parser.add_argument('-v', action='store_true', dest='verbose',
  68. help='Enable verbose logging')
  69. parser.add_argument('-c', action='store', dest='cookies',
  70. help='Space separated list of cookies',
  71. nargs='+', default=[])
  72. results = parser.parse_args()
  73.  
  74. logger.setLevel(logging.DEBUG if results.verbose else logging.INFO)
  75.  
  76.  
  77. def testPayload(payload, p, link):
  78. br.form[str(p.name)] = payload
  79. br.submit()
  80. # if payload is found in response, we have XSS
  81. if payload in br.response().read():
  82. color.log(logging.DEBUG, color.BOLD + color.GREEN, 'XSS found!')
  83. report = 'Link: %s, Payload: %s, Element: %s' % (str(link),
  84. payload, str(p.name))
  85. color.log(logging.INFO, color.BOLD + color.GREEN, report)
  86. xssLinks.append(report)
  87. br.back()
  88.  
  89.  
  90. def initializeAndFind():
  91.  
  92. if not results.url: # if the url has been passed or not
  93. color.log(logging.INFO, color.GREEN, 'Url not provided correctly')
  94. return []
  95.  
  96. firstDomains = [] # list of domains
  97. allURLS = []
  98. allURLS.append(results.url) # just one url at the moment
  99. largeNumberOfUrls = [] # in case one wants to do comprehensive search
  100.  
  101. # doing a short traversal if no command line argument is being passed
  102. color.log(logging.INFO, color.GREEN, 'Doing a short traversal.')
  103. for url in allURLS:
  104. smallurl = str(url)
  105. # Test HTTPS/HTTP compatibility. Prefers HTTPS but defaults to
  106. # HTTP if any errors are encountered
  107. try:
  108. test = httplib.HTTPSConnection(smallurl)
  109. test.request("GET", "/")
  110. response = test.getresponse()
  111. if (response.status == 200) | (response.status == 302):
  112. url = "https://www." + str(url)
  113. elif response.status == 301:
  114. loc = response.getheader('Location')
  115. url = loc.scheme + '://' + loc.netloc
  116. else:
  117. url = "http://www." + str(url)
  118. except:
  119. url = "http://www." + str(url)
  120. try:
  121. br.open(url)
  122. for cookie in results.cookies:
  123. color.log(logging.INFO, color.BLUE,
  124. 'Adding cookie: %s' % cookie)
  125. br.set_cookie(cookie)
  126. br.open(url)
  127. color.log(logging.INFO, color.GREEN,
  128. 'Finding all the links of the website ' + str(url))
  129. for link in br.links(): # finding the links of the website
  130. if smallurl in str(link.absolute_url):
  131. firstDomains.append(str(link.absolute_url))
  132. firstDomains = list(set(firstDomains))
  133. except:
  134. pass
  135. color.log(logging.INFO, color.GREEN,
  136. 'Number of links to test are: ' + str(len(firstDomains)))
  137. if results.compOn:
  138. color.log(logging.INFO, color.GREEN,
  139. 'Doing a comprehensive traversal. This may take a while')
  140. for link in firstDomains:
  141. try:
  142. br.open(link)
  143. # going deeper into each link and finding its links
  144. for newlink in br.links():
  145. if smallurl in str(newlink.absolute_url):
  146. largeNumberOfUrls.append(newlink.absolute_url)
  147. except:
  148. pass
  149. firstDomains = list(set(firstDomains + largeNumberOfUrls))
  150. color.log(logging.INFO, color.GREEN,
  151. 'Total Number of links to test have become: ' +
  152. str(len(firstDomains))) # all links have been found
  153. return firstDomains
  154.  
  155.  
  156. def findxss(firstDomains):
  157. # starting finding XSS
  158. color.log(logging.INFO, color.GREEN, 'Started finding XSS')
  159. if firstDomains: # if there is atleast one link
  160. for link in firstDomains:
  161. blacklisted = False
  162. y = str(link)
  163. color.log(logging.DEBUG, color.YELLOW, str(link))
  164. for ext in blacklist:
  165. if ext in y:
  166. color.log(logging.DEBUG, color.RED,
  167. '\tNot a good url to test')
  168. blacklisted = True
  169. break
  170. if not blacklisted:
  171. try:
  172. br.open(str(link)) # open the link
  173. if br.forms(): # if a form exists, submit it
  174. params = list(br.forms())[0] # our form
  175. br.select_form(nr=0) # submit the first form
  176. for p in params.controls:
  177. par = str(p)
  178. # submit only those forms which require text
  179. if 'TextControl' in par:
  180. color.log(logging.DEBUG, color.YELLOW,
  181. '\tParam: ' + str(p.name))
  182. for item in payloads:
  183. testPayload(item, p, link)
  184. except:
  185. pass
  186. color.log(logging.DEBUG, color.GREEN + color.BOLD,
  187. 'The following links are vulnerable: ')
  188. for link in xssLinks: # print all xss findings
  189. color.log(logging.DEBUG, color.GREEN, '\t' + link)
  190. else:
  191. color.log(logging.INFO, color.RED + color.BOLD,
  192. '\tNo link found, exiting')
  193.  
  194.  
  195. # calling the function
  196. firstDomains = initializeAndFind()
  197. findxss(firstDomains)

原理

看过代码之后我们就知道软件的实现原理

1,根据提出的URL 设置为http[s]://www+URL

2,根据爬取得页面查找是否有表单。如果有表单

使用提供得3个载荷分别测试 = [‘<svg “ons>’, ‘” onfocus=”alert(1);’, ‘javascript:alert(1)’]

发表评论

邮箱地址不会被公开。 必填项已用*标注