Python学习

Python 学习

图片写入文字

OpenCV

仅支持英文

pip install opencv-python

1
2
3
4
5
6
7
8
9
10
11
12
import cv2

# 加载背景图片
source_img = cv2.imread("C://Users//chenkaixin12121//Downloads//Snipaste_2023-07-28_16-43-20.png")
# 在图片上添加文字信息
# 图片、文字、坐标、字体、大小、颜色、厚度
cv2.putText(source_img, "Hello World", (100, 100), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 3)
# 保存图片
cv2.imwrite("C://Users//chenkaixin12121//Downloads//save.png", source_img)
# 显示图片
# cv2.imshow("add_text", source_img)
# cv2.waitKey()
PIL

支持中英文

pip install pillow

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
import cv2
import numpy as np
from PIL import ImageFont, ImageDraw, Image

# 加载背景图片
source_img = cv2.imread("C://Users//chenkaixin12121//Downloads//Snipaste_2023-07-28_16-43-20.png")
# 设置字体
font = ImageFont.truetype("font/simsun.ttc", 32)
img_pil = Image.fromarray(source_img)
draw = ImageDraw.Draw(img_pil)
# 设置文字信息
draw.text((100, 100), "Hello World", font=font, fill=(0, 0, 0))
draw.text((100, 200), "你好", font=font, fill=(0, 0, 0))
save_img = np.array(img_pil)
# 保存图片
cv2.imwrite("C://Users//chenkaixin12121//Downloads//save.png", save_img)
# 显示图片
# cv2.imshow("add_text", save_img)
# cv2.waitKey()

生成随机优惠码

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
import random
import string

random_sequence = string.ascii_letters + string.digits


def get_random(len1=4):
return "".join(random.sample(random_sequence, len1))


def get_code(num, len1=5, len2=4):
result_code = set()
while len(result_code) < num:
temp_str = "—".join([get_random(len2) for _ in range(len1)])
result_code.add(temp_str)
return result_code


codes = get_code(100)
print(codes)

with open('C://Users//chenkaixin12121//Downloads//coupon.txt', 'w', encoding='utf-8') as file:
for item in codes:
file.write(str(item) + "\n")

将优惠码保存到 MySQL 中

pip install sqlalchemy
pip install pymysql

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
from sqlalchemy import Column, Integer, String, create_engine
from sqlalchemy.orm import sessionmaker, declarative_base

Base = declarative_base()

database_info = {
'user': 'root',
'passwd': 'root',
'ip': 'localhost',
'port': '3306',
'database': 'test_db'
}


class Coupon(Base):
__tablename__ = 'coupon'

id = Column(Integer, primary_key=True, autoincrement=True)
code = Column(String(200))


def make_connect(db_info):
engine = create_engine('mysql+pymysql://{user}:{passwd}@{ip}:{port}/{database}'.format_map(db_info))
db_session = sessionmaker(bind=engine)
session = db_session()
return session


def upload_to_database():
session = make_connect(database_info)
with open('C://Users//chenkaixin12121//Downloads//coupon.txt', 'r', encoding="utf-8") as file:
for line in file.readlines():
session.add(Coupon(code=line))
session.commit()
session.close()


if __name__ == '__main__':
upload_to_database()

将优惠码保存到 Redis 中

pip install redis

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
import redis


def make_connect():
r = redis.Redis(host='localhost', port=6379, db=0)
return r


def upload_to_database():
session = make_connect()
with open('C://Users//chenkaixin12121//Downloads//coupon.txt', 'r', encoding='utf-8') as file:
for line in file.readlines():
print(line, end="")
session.sadd("set_key", line)


if __name__ == '__main__':
upload_to_database()

统计文件中单词的个数

1
2
3
4
5
6
7
8
9
10
11
12
import string

alphabet = {letter: 0 for letter in string.ascii_letters}

with open('C://Users//chenkaixin12121//Downloads//coupon.txt', 'r', encoding='utf-8') as file:
for line in file:
words = list(line)
for word in words:
if word in alphabet:
alphabet[word] += 1

print(alphabet)

修改图片尺寸

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
import os

from PIL import Image


def process_image(filename, m_width=100, m_height=100):
image = Image.open(filename)
new_img = image.resize((m_width, m_height), Image.LANCZOS)
new_filename = os.path.dirname(filename) + '//new-' + os.path.basename(filename)
print(new_filename)
new_img.save(new_filename)
new_img.close()


ext = ['jpg', 'jpeg', 'png']
file_dir = 'C://Users//chenkaixin12121//Downloads'
files = os.listdir(file_dir)

for file in files:
if file.split('.')[-1] in ext:
process_image(file_dir + '//' + file, 200, 200)

统计单词出现的个数

1
2
3
4
5
6
7
8
9
10
11
12
from collections import Counter

words = [
'look', 'into', 'my', 'eyes', 'look', 'into', 'my', 'eyes',
'the', 'eyes', 'the', 'eyes', 'the', 'eyes', 'not', 'around', 'the',
'eyes', "don't", 'look', 'around', 'the', 'eyes', 'look', 'into',
'my', 'eyes', "you're", 'under'
]
word_counts = Counter(words)

top_three = word_counts.most_common(3)
print(top_three)

统计文件中多少行代码

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
def statistics_line(filename):
count = {'code': 0, 'annotation': 0, 'space': 0}
with open(filename, 'r', encoding='utf-8') as file:
for line in file:
if line == '\n':
count['space'] += 1
elif line.strip().startswith('#'):
count['annotation'] += 1
else:
count['code'] += 1
return count


result = statistics_line("C://Users//chenkaixin12121//Downloads//test.py")
print(result)

输出 html 文件的正文

pip install lxml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
from bs4 import BeautifulSoup as BS

html_doc = """
<html><head><title>The Dormouse's story</title></head>
<body>
<p class="title"><b>The Dormouse's story</b></p>

<p class="story">Once upon a time there were three little sisters; and their names were
<a href="http://example.com/elsie" class="sister" id="link1">Elsie</a>,
<a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and
<a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;
and they lived at the bottom of a well.</p>

<p class="story">...</p>
"""
soup = BS(html_doc, 'lxml')
print(soup.get_text())

输出 html 文件的链接

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
from bs4 import BeautifulSoup as BS

html_doc = """
<html><head><title>The Dormouse's story</title></head>
<body>
<p class="title"><b>The Dormouse's story</b></p>

<p class="story">Once upon a time there were three little sisters; and their names were
<a href="http://example.com/elsie" class="sister" id="link1">Elsie</a>,
<a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and
<a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;
and they lived at the bottom of a well.</p>

<p class="story">...</p>
"""
soup = BS(html_doc, 'lxml')
for link in soup.find_all('a'):
print(link.get('href'))

生成验证码图片

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
import random

from PIL import Image, ImageDraw, ImageFont, ImageFilter

width = 60 * 4
height = 60
image = Image.new('RGB', (width, height), (255, 255, 255))
font = ImageFont.truetype('C://Windows//Fonts//Arial.ttf', 36)
draw = ImageDraw.Draw(image)
# 填充像素
for x in range(width):
for y in range(height):
draw.point((x, y), fill=(random.randint(64, 255), random.randint(64, 255), random.randint(64, 255)))
# 输出文字
for t in range(4):
draw.text((60 * t + 10, 10), chr(random.randint(65, 90)), font=font,
fill=(random.randint(32, 127), random.randint(32, 127), random.randint(32, 127)))
# 模糊
image = image.filter(ImageFilter.BLUR)
image.save('C://Users//chenkaixin12121//Downloads//code.jpg', 'jpeg')

检测用户输入敏感词后输出指定内容

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
sensitive = ['北京',
'程序员',
'公务员',
'领导',
'牛比',
'牛逼',
'你娘',
'你妈',
'love',
'sex',
'jiangge']

while True:
word = input("请输入文字:")
if word == 'end':
break
if word in sensitive:
print('Freedom')
else:
print('Human Rights')

检测用户输入敏感词后替换

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
sensitive = ['北京',
'程序员',
'公务员',
'领导',
'牛比',
'牛逼',
'你娘',
'你妈',
'love',
'sex',
'jiangge']

while True:
word = input("请输入文字:").strip()
if word == 'end':
break
for item in sensitive:
if item in word:
word = word.replace(item, '*' * (len(item)))
print(word)

爬取图片

pip install requests

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
import re

import requests

headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36",
"Referer": "https://pic.netbian.com/4kmeinv/index.html"
}

url_list = ['https://pic.netbian.com/4kmeinv/index.html'] + \
[f'https://pic.netbian.com/4kmeinv/index_{i}.html' for i in range(2, 70)]

img_dir = 'C://Users//chenkaixin12121//Downloads//图片//'

for url in url_list:
response = requests.get(url, headers=headers)
response.encoding = 'gbk'
img_info = re.findall('img src="(.*?)" alt="(.*?)" /', response.text)

for src, name in img_info:
img_url = 'https://pic.netbian.com' + src
img_content = requests.get(img_url, headers=headers).content
img_name = name + '.jpg'
print(img_name)
with open(img_dir + img_name, 'wb') as file:
print(f"正在下载图片:{img_name}")
file.write(img_content)

学生信息-文本写入到 excel

pip install xlwt

1
2
3
4
5
{
"1":["张三",150,120,100],
"2":["李四",90,99,95],
"3":["王五",60,66,68]
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
import json

import xlwt

with open('C://Users//chenkaixin12121//Downloads//测试json.txt', 'r', encoding='utf-8') as file:
json_str = json.load(file)

# 创建工作簿
workbook = xlwt.Workbook(encoding='utf-8')
# 创建 Sheet
work_sheet = workbook.add_sheet('student')
for row, key in enumerate(json_str):
work_sheet.write(row, 0, key)
for col, value in enumerate(json_str[key]):
work_sheet.write(row, col + 1, value)
workbook.save('C://Users//chenkaixin12121//Downloads//测试.xlsx')

城市信息-文本写入到 excel

1
2
3
4
5
{
"1" : "上海",
"2" : "北京",
"3" : "成都"
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
import json

import xlwt

with open('C://Users//chenkaixin12121//Downloads//测试json.txt', 'r', encoding='utf-8') as file:
json_str = json.load(file)

# 创建工作簿
workbook = xlwt.Workbook(encoding='utf-8')
# 创建 Sheet
work_sheet = workbook.add_sheet('city')
for row, key in enumerate(json_str):
work_sheet.write(row, 0, key)
work_sheet.write(row, 1, json_str[key])
workbook.save('C://Users//chenkaixin12121//Downloads//测试.xlsx')

数字信息-文本写入到 excel

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
import json

import xlwt

json_str = """
[
[1, 82, 65535],
[20, 90, 13],
[26, 809, 1024]
]
"""

json_list = json.loads(json_str)

# 创建工作簿
workbook = xlwt.Workbook(encoding='utf-8')
# 创建 Sheet
work_sheet = workbook.add_sheet('numbers')
for index in range(len(json_list)):
for index2, item2 in enumerate(json_list[index]):
work_sheet.write(index, index2, item2)
workbook.save('C://Users//chenkaixin12121//Downloads//测试.xlsx')

学生信息-xls 写入到 xml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
from xml.etree.ElementTree import Element, SubElement, Comment, ElementTree

import xlrd

workbook = xlrd.open_workbook('C://Users//chenkaixin12121//Downloads//测试.xlsx')
work_sheet = workbook.sheet_by_index(0)

data = {}

for i in range(work_sheet.nrows):
row = work_sheet.row(i)
key = row[0].value
data[key] = [item.value for item in row[1:]]

root = Element('root')
comment = Comment('学生信息表 "id" : [名字, 数学, 语文, 英文]')
root.append(comment)
child = SubElement(root, 'students')
child.text = str(data)
tree = ElementTree(root)
tree.write('C://Users//chenkaixin12121//Downloads//测试.xml', encoding='utf-8')

城市信息-xls 写入到 xml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
from xml.etree.ElementTree import Element, SubElement, Comment, ElementTree

import xlrd

workbook = xlrd.open_workbook('C://Users//chenkaixin12121//Downloads//测试.xlsx')
work_sheet = workbook.sheet_by_index(0)

data = dict()

for i in range(work_sheet.nrows):
data[work_sheet.cell_value(i, 0)] = work_sheet.cell_value(i, 1)

root = Element('root')
comment = Comment('城市信息')
root.append(comment)
child = SubElement(root, 'citys')
child.text = str(data)
tree = ElementTree(root)
tree.write('C://Users//chenkaixin12121//Downloads//测试.xml', encoding='utf-8')

数字信息-xls 写入到 xml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
from xml.etree.ElementTree import Element, SubElement, Comment, ElementTree

import xlrd

workbook = xlrd.open_workbook('C://Users//chenkaixin12121//Downloads//测试.xlsx')
work_sheet = workbook.sheet_by_index(0)

data = list()

for i in range(work_sheet.nrows):
row = work_sheet.row(i)
data.append([item.value for item in row])

root = Element('root')
comment = Comment('数字信息')
root.append(comment)
child = SubElement(root, 'numbers')
child.text = str(data)
tree = ElementTree(root)
tree.write('C://Users//chenkaixin12121//Downloads//测试.xml', encoding='utf-8')

xls 文件统计数据

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
import re

import xlrd


def collect_times(xls_name):
# 读取第一个表格数据
xls = xlrd.open_workbook(xls_name)
sheet = xls.sheet_by_index(0)

sum_time = 0
caller_sum = 0

# 去掉表头
for n in range(1, sheet.nrows):
# 读取通话时长与呼叫类型两列数据
call_time, call_type = sheet.row_values(n)[3:5]
# 通过正则,得到时间,如9分23秒返回(9,23)
minute, second = re.match(r'(\d*?)[分]?(\d+)秒', call_time).groups()

# 转化成秒
if minute == '': minute = '0'
times = int(minute) * 60 + int(second)

# 统计主叫时间
if call_type == "主叫":
caller_sum += times
sum_time += times

# 打印统计结果
print("本月主叫通话时间:%s分%s秒" % (divmod(caller_sum, 60)))
print("本月被叫通话时间:%s分%s秒" % (divmod(sum_time - caller_sum, 60)))
print("本月通话时间总计:%s分%s秒" % (divmod(sum_time, 60)))


if __name__ == "__main__":
collect_times('2023年08月语音通信.xls')

密码加密

1
2
3
4
5
6
7
8
9
10
11
12
from hashlib import md5


def md5_encrypt(value, salt='sdfljoiwers23423'):
# new_md5 = md5(value.encode("utf-8"))
new_md5 = md5()
new_md5.update((value + salt).encode(encoding='utf-8'))
return new_md5.hexdigest()


password = '123456'
print(md5_encrypt(password))

使用 web 框架开发留言簿

pip install fastapi
pip install uvicorn

uvicorn main:app --reload

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
from datetime import datetime

from fastapi import FastAPI
from pydantic import BaseModel

app = FastAPI()


class Message(BaseModel):
id: int | None = None
name: str
content: str
create_time: str | None = None


def response(code, data):
return {'code': code, 'data': data}


message_list = list()


@app.post("/leave_message")
async def leave_message(message: Message):
message.id = len(message_list) + 1
message.create_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
message_list.append(message)
return response(200, message.id)


@app.get("/list")
async def get_message_list():
return response(200, message_list)

使用 web 框架开发 TodoList

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
from datetime import datetime

from fastapi import FastAPI
from pydantic import BaseModel

app = FastAPI()


class Task(BaseModel):
id: int | None = None
name: str
create_time: str | None = None


def response(code, data):
return {'code': code, 'data': data}


task_dict = dict()


@app.post("/create")
async def create_task(task: Task):
task.id = max(task_dict.values(), default=0)
task.create_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
task_dict[task.id] = task
return response(200, task.id)


@app.delete("/{task_id}")
async def remove_task(task_id: int):
del task_dict[task_id]
return response(code=200)


@app.get("/list")
async def get_task_list():
return response(200, [item for item in task_dict.values()])