一个非常简单的网页刷量脚本(Python)
疯茨 2024-08-14 12:05:03 阅读 71
一、脚本功能
增加网页浏览量,简称刷量
二、脚本文件结构
ipporxy.txt # 存放代理ipmain.py # 主程序proxiesIp.py # 代理ip检测user_set.py # 刷量调整区域
三、具体代码
ipporxy.txt
116.253.208.239:33080
123.56.175.31:3128
139.9.64.238:443
123.56.175.31:3128
139.9.64.238:443
main.py
<code>#!/usr/bin/env python
'''
1. 手动输入某个新闻链接,然后模拟人工访问网页
2. 可定义:N 个网页、刷新数量、刷新速度、访问浏览器
3. 数量要求:[700 ~ 100000]
'''
from ast import Try
import requests
import warnings
import urllib3
from bs4 import BeautifulSoup
import os,time,random,datetime
import _thread,queue
import user_set as user
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from requests.packages.urllib3.exceptions import InsecureRequestWarning
warnings.filterwarnings('ignore')
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
urllib3.disable_warnings()
available_ip = ''
test_url = 'https://www.baidu.com'
# 读取有效 ip 文件
def ip_file ():
available_ips=[]
try:
with open('./ipporxy.txt','r',encoding='UTF-8') as f:code>
for l in f.readlines():
available_ips.append(l.rstrip('\n'))
return available_ips
except Exception:
print("Error 没有ipporxy.txt文件")
exit()
def ip_random ():
ips = ip_file ()
if ips:
proxie = random.choice(ips)
requests.DEFAULT_RETRIES = 5
s = requests.session()
s.keep_alive = False
proxies = { "http": proxie, "https": proxie}
try:
res = requests.get(test_url,proxies=proxies,verify=False,timeout=2)
except Exception:
print('代理 ip 连接出错,更换ip中...')
ip_random ()
else:
if res.status_code == 200:
print('ip 通道正常:[%s],可以使用....'%proxie)
available_ip = proxies
else:
print('Error 没有可用的代理 ip')
# 设置主功能,传入线程
def pv_pool(name,url,count):
for i in range(count):
headers = random.choice(user.UserAgent1)
chrome_options = Options()
webdriver.DesiredCapabilities.CHROME['acceptSslCerts']=True
chrome_options.add_argument('headless')
chrome_options.add_argument('--disable-gpu')
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--user-agent=%s'%headers)
try:
chrome_options.add_argument('--proxy-server=%s'%available_ip)
driver = webdriver.Chrome(options = chrome_options)
driver.get(url) # 访问页面
except Exception:
print('Error 访问出现错误,更换 ip')
ip_random ()
continue
else:
time.sleep(random.uniform(user.speed_low,user.speed_high))
driver.close()
print("第 %d 次访问网页【%s】"%(i+1,datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
print('~~~~{}线程结束时间:{}~~~~~\n'.format(name,datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
if __name__ == "__main__":
# 获取有效 ip 地址
ip_random ()
# 开始刷点击量
for site in user.webSites:
url = site['url']
times = int(site['times'])//3
print(times)
try:
# print(url,times)
# 启动多线程
print('※※※※※线程启动时间:{}※※※※\n'.format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
_thread.start_new_thread(pv_pool, (1,url,times))
_thread.start_new_thread(pv_pool, (2,url,times))
_thread.start_new_thread(pv_pool, (3,url,times))
except:
print ("Error: 无法启动线程")
while 1:
pass
proxiesIp.py
import telnetlib
import requests
test_url = 'https://www.baidu.com'
# ip 检测,存储有效 ip地址
def ip_is_alive(ip_port):
ip,port=ip_port[0],ip_port[1]
try:
tn = telnetlib.Telnet(ip, port=port,timeout=1)
except:
print('[-]无效ip:{}:{}'.format(ip,port))
else:
proxies = ip+':'+port
try:
res = requests.get(test_url, proxies={"http": proxies, "https": proxies},timeout=1)
except:
print('[-]无效ip:{}:{}'.format(ip,port))
else:
if res.status_code == 200:
print('[+]有效ip:{}:{}'.format(ip,port))
# 将有效 ip 写入文件中
with open('ipporxy.txt','a+') as f:
f.write(ip+':'+port+'\n')
user_set.py
#!/usr/bin/env python
import proxiesIp
import requests
from lxml import etree
# 设置多个需要刷量网址,及其次数,默认值为num
num = 30
webSites = [{'url':'www.baidu.com','times':num}]
# 多个网页
# webSites = [{'url':'xxx','times':num},{'url':'xxx','times':num}]
# 设置随机的刷新速度
speed_low = 2
speed_high = 3
# 设置微信浏览器:安卓,ios
UserAgent1 =[
{'User-Agent':'mozilla/5.0 (linux; u; android 4.1.2; zh-cn; mi-one plus build/jzo54k) applewebkit/534.30 (khtml, like gecko) version/4.0 mobile safari/534.30 MicroMessenger/5.0.1.352'},
{'User-Agent':'mozilla/5.0 (iphone; cpu iphone os 5\_1\_1 like mac os x) applewebkit/534.46 (khtml, like gecko) mobile/9b206 MicroMessenger/5.0'},
{'User-Agent':'Mozilla/5.0 (Linux; Android 5.0; SM-N9100 Build/LRX21V) > AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 > Chrome/37.0.0.0 Mobile Safari/537.36 > MicroMessenger/6.0.2.56\_r958800.520 NetType/WIFI'},
{'User-Agent':'Mozilla/5.0 (iPhone; CPU iPhone OS 7\_1\_2 like Mac OS X) > AppleWebKit/537.51.2 (KHTML, like Gecko) Mobile/11D257 > MicroMessenger/6.0.1 NetType/WIFI'},
{'User-Agent':'Mozilla/5.0 (Linux; U; Android 2.3.6; zh-cn; GT-S5660 Build/GINGERBREAD) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1 MicroMessenger/4.5.255'},
{'User-Agent':'Mozilla/5.0 (iPhone; CPU iPhone OS 7_1_2 like Mac OS X) AppleWebKit/537.51.2 (KHTML, like Gecko) Mobile/11D257 MicroMessenger/6.1 NetType/WIFI'}]
# 设置pc浏览器:chrome、safari、火狐 等
UserAgent2 = [
{'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/102.0.0.0 Safari/537.36'},
{'User-Agent':'Mozilla/5.0 (Windows NT6.1; WOW64) Apple\WebKit/537.36 (KHTML, likeGecko) Chrome/69.0.3497.92 Safari/537.36'},
{'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36'},
{'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1'},
{'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36'},
{'user-agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'},
{'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.18362'},
{'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.93 Safari/537.36'}]
# 测试伪装 ip 地址,后期设置 ip 接口获取
def turn_symbol(c1):
new_c1 = c1.replace('\n','').replace('\t','')
return new_c1
#获取免费代理网站ip(ip+空格+port形式)
def getfreeIps():
f=open('ipporxy.txt', "r+")
f.truncate()
for i in range(1,100):
url = 'http://www.89ip.cn/index_{}.html'.format(i)
res = requests.get(url)
res.encoding = 'utf-8'
html=etree.HTML(res.text)
ipdress=html.xpath('//table[@class="layui-table"]/tbody/tr/td[1]/text()')code>
port=html.xpath('//table[@class="layui-table"]/tbody/tr/td[2]/text()')code>
ipdress=list(map(turn_symbol,ipdress))
port=list(map(turn_symbol,port))
data=list(zip(ipdress,port))
for j in range(len(data)):
# 测试并存储有效 ip
proxiesIp.ip_is_alive([data[j][0],data[j][1]])
if __name__ == "__main__":
# 测试可用的 ip 地址,并进行本地存储
getfreeIps()
声明
本文内容仅代表作者观点,或转载于其他网站,本站不以此文作为商业用途
如有涉及侵权,请联系本站进行删除
转载本站原创文章,请注明来源及作者。