【AI-CAMP三期】第三组作业

刘旭:

第一周作业



第三周作业
import re
import requests
from urllib.parse import quote

KEYWORD = "充气拱门 图片"


def get_images(keyword, num):
    print(f"Start downloading images for {keyword}")
    keyword_encode = quote(keyword)
    # Baidu的图片接口
    url_pattern = 'https://image.baidu.com/search/acjson?tn=resultjson_com&ipn=rj&ct=201326592&is=&fp=result&' \
                  'queryWord={keyword_encode}&cl=2&lm=-1&ie=utf-8&oe=utf-8&adpicid=&st=-1&z=&ic=0&hd=&latest=&' \
                  'copyright=&word={keyword_encode}&s=&se=&tab=&width=&height=&face=0&istype=2&qc=&nc=1&fr=&' \
                  'expermode=&force=&cg=star&pn={page_num}&rn={info_num}&gsm=1e&1598868283333= '

    header = {
        'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 11_2_1) AppleWebKit/537.36 (KHTML, like Gecko) '
                      'Chrome/89.0.4389.90 Safari/537.36',
        'Referer': 'https://image.baidu.com'
    }
    page_num = 1
    info_num = 30
    image_pattern = re.compile('thumbURL.*?\.jpg')
    img_set = set()
    n = 1
    while True:
        resp = requests.get(url_pattern.format(keyword_encode=keyword_encode,
                                               page_num=page_num,
                                               info_num=info_num), headers=header)
        image_url_list = image_pattern.findall(resp.text)
        image_url_list = map(lambda x: x.replace('thumbURL":"', ''), image_url_list)
        for img_url in image_url_list:
            if n > num:
                print(f"Finished: Totally download {n-1} images.")
                return
            # img_url去重
            if img_url in img_set:
                continue
            else:
                img_set.update((img_url,))
            download_img(img_url, n)
            n += 1
        page_num += 1


def download_img(img_url, num):
    print(f"Downloading image {num}: {img_url}")
    img = requests.get(img_url)
    with open('data/image_{}.jpg'.format(num), 'wb') as f:
        f.write(img.content)


if __name__ == "__main__":
    get_images(KEYWORD, 1000)

第三周大组作业

已在越影平台创建需求,数据上传100%,标注完成,BMK中,需求id:66, 数据量: 2564

第五周作业

https://git-core.megvii-inc.com/ai_train/ai-liuxu04/-/tree/master/week4

第一周作业-包家睿
import cv2
import imgaug as ia
import imgaug.augmenters as iaa
import nori2 as nori
import numpy as np
import boto3
from meghair.utils import io
from meghair.utils.imgproc import imdecode

# 课程作业、针对1percent_ImageNet.txt数据进行基本的数据统计、数据预处理、并可视化
# 1.统计图片的平均、最大、最小宽高
# 2.对数据做旋转增强
# 3.randomcrop边缘像素
# 4.并可视化这些图片

from  matplotlib import pyplot as plt
%matplotlib inline

# 可视化
class ImageShow():
    """
    按照指定布局可视化图片
    
    n_rows: int 布局行数
    n_cols: int 布局列数
    figsize: list  设置图片大小:[x, y] 宽度,高度以英寸为单位。
    """
    def __init__(self, n_rows, n_cols, figsize=[6.4, 4.8]):
        self.n_rows = n_rows
        self.n_cols = n_cols
        self.fig, self.axes = plt.subplots(n_rows, n_cols, sharex=True, squeeze=False, figsize= figsize)
        self.imgs = []
    
    def imgs_append(self, img):
        """
        将图片数据追加至图片数组
        
        img: numpy.ndarray 使用 cv2.imread 打开的图片
        """
        b, g, r = cv2.split(img)  # 分解Opencv里的标准格式B、G、R
        img = cv2.merge([r,g, b])  # 将BGR格式转化为常用的RGB格式
        self.imgs.append(img)
        
    def show_plt(self):
        """
        可视化图片数组
        """
#         [(self.axes[i][j].imshow(self.imgs[i*n_cols + j]), self.axes[i][j].axis('off')) for i in range(self.n_rows) for j in range(self.n_cols) if i*n_cols + j < len(self.imgs)]
        n_rows = self.n_rows
        n_cols = self.n_cols
        for i in range(n_rows):
            for j in range(n_cols):
                index = i* n_cols + j 
                axe = self.axes[i][j]
                axe.axis("off")
                if index < len(self.imgs):
                    axe.imshow(self.imgs[index])
# 可视化End

fetcher = nori.Fetcher()

ia.seed(1)
NUM = 6

# 定义imgaug增强方法
seq = iaa.Sequential([
    iaa.Crop(px=(0, 16)),
    iaa.Affine(rotate=(-25, 25))
], random_order=True)

# 从OSS下载“1percent_ImageNet.txt”文件
host = "http://oss.i.brainpp.cn"
s3_client = boto3.client('s3', endpoint_url=host)
resp = s3_client.download_file("ai-cultivate", "1percent_ImageNet.txt", "1percent_ImageNet.txt")

img_show = ImageShow(2, 1, [20, 8])
img_heights = []
img_widths = []
for i, line in enumerate(open("1percent_ImageNet.txt")):
    # 获得单个图片
    img = imdecode(fetcher.get(line.split()[0]))
    
    # 获取图片高宽
    sp = img.shape # 0:height 1:width 2:colors
    img_heights.append(sp[0])
    img_widths.append(sp[1])
    
    images = np.array(
        [img] * NUM,
        dtype=np.uint8
    )
    H = sp[0]
    W = sp[1]
    write_img = np.zeros((H, (W+10)*NUM, 3), dtype=np.uint8)
    # 进行增强操作
    images_aug = seq.augment_images(images=images)
    for j, img in enumerate(images_aug):
        # 将多个增强图片添加到一个数组中
        write_img[:, j*(W+10): j*(W+10)+W, :] = img
      
    img_show.imgs_append(write_img)
    # 保存图片
    # cv2.imwrite("/home/baojiarui/day1/img/img_%d.jpg" % i, write_img)
    
    if i > 8:
        break
print("img count %s" % len(img_heights))
print("max height %s" % max(img_heights))
print("min height %s" % min(img_heights))
print("average height %s" % (sum(img_heights) / len(img_heights)))

print("max widths %s" % max(img_widths))
print("min widths %s" % min(img_widths))
print("average widths %s" % (sum(img_widths) / len(img_widths)))

img_show.show_plt()

统计结果:
max height 5065
min height 21
average height 405.4505503083288
max widths 4368
min widths 46
average widths 471.49918039185076

第三周作业
import requests
import time
import json
import re
import random

def parse_cookies(cookie_str):
    cookies = {}
    for line in cookie_str.split(';'): 
        key, value = line.split('=', 1) 
        cookies[key] = value.encode("utf-8").decode("latin1")
    return cookies


def fetch_url(url, hearders, cookies):
    try:                   
        resp = requests.get(url, headers = headers, cookies = cookies)
        resp.raise_for_status()
        return resp.content 
    except Exception as e:
        print(e)  

def get_resp_content(url, headers, cookies):
    content = fetch_url(url, headers, cookies)
    return content.decode('utf-8')
    
        
def parse_content_json(content_json, element_name):

    value_list = []
    data_list = json.loads(content_json)
    for data in data_list:
        element_value = data.get(element_name)
        if element_value:
            value_list.append(element_value)
    return value_list


def re_search_one(content, regex, output_group):
    searchObj = re.search(regex, content, re.M|re.I)
    content_group = output_group % searchObj.group(1)
    return content_group


def download_resource(url_list, file_name_regex, save_dir, file_suffix):
    for url in url_list:
        file_name = re_search_one(url, file_name_regex, "%s")   
        file_path = "%s/%s%s" % (save_dir, file_name, file_suffix)  
        resource = fetch_url(url, None, None)  
        with open(file_path, "wb") as w:
            w.write(resource)
            
        wait_second = random.randint(2, 8)
        time.sleep(wait_second) 
        print("wait second: %s ; save file: %s" % (wait_second, file_path))

cookie_str = r'$cookie'
headers = {'User-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36'}
resp_content_regex = r'.*"data":(.*)'  
output_group = "%s]}]"
file_name_regex = r'.*u=(.*)&fm=.*'
element_name = "thumbURL" 
file_suffix = ".jpg"  
save_dir = "$save_dir" 

i = 1
num = 30
while i < 2000:
    url = "https://image.baidu.com/search/acjson?tn=resultjson_com&logid=6690820716424463597&ipn=rj&ct=201326592&is=&fp=result&queryWord=%E5%85%85%E6%B0%94%E6%8B%B1%E9%97%A8&cl=2&lm=-1&ie=utf-8&oe=utf-8&adpicid=&st=-1&z=&ic=0&hd=&latest=&copyright=&word=%E5%85%85%E6%B0%94%E6%8B%B1%E9%97%A8&s=&se=&tab=&width=&height=&face=0&istype=2&qc=&nc=1&fr=&expermode=&force=&pn={}&rn={}&gsm=3c&1622605320685=".format(i, num)

    print(url)
    cookies = parse_cookies(cookie_str)
    content = get_resp_content(url, headers, cookies)
    re_result = re_search_one(content, resp_content_regex, output_group)
    value_list = parse_content_json(re_result, element_name)
    download_resource(value_list, file_name_regex, save_dir, file_suffix)
    
    i += num
    time.sleep(random.randint(10, 40))
第五周作业

https://git-core.megvii-inc.com/ai_train/ai-baojiarui/-/tree/master/week_05

熊磊:

第一周作业

【AI培训第三期课后作业内容帖】

from  matplotlib import pyplot as plt
%matplotlib inline

def show_plt(img, W=6.4, H=4.8):
    """
    img: numpy.ndarray cv2.imread 打开的图片
    """
    plt.rcParams['figure.figsize'] = (W, H)
    img2 = img[:,:,::-1]
    plt.imshow(img2)
    plt.axis('off')
    plt.show()


import cv2
import boto3
import nori2 as nori
import pandas as pd 
import numpy as np
import imgaug as ia
from imgaug import augmenters as iaa
from meghair.utils.imgproc import imdecode

host = "http://oss.i.brainpp.cn"
bucket_name = "ai-cultivate"
object_key = "1percent_ImageNet.txt"


# Client初始化
s3_client = boto3.client('s3', endpoint_url=host)

# 获取object
resp = s3_client.get_object(Bucket=bucket_name, Key=object_key)
body_byte = resp['Body'].read()

# byte转str
body_str = str(body_byte, encoding="utf-8")

# str转数组
body_list = []
[body_list.append(item.split("\t"))    for item in body_str.split("\n")]

# read datas
fetcher = nori.Fetcher()

pd_list = []
H, W = 450, 600
NUM = 6

# 图片增强
seq = iaa.Sequential([
    iaa.Crop(px=(0, 16)),
    iaa.Resize({"height": H, "width": W}),
    iaa.Affine(
      rotate=(-45, 45)  
    )
])


'''
基础数据统计
'''
body_len = len(body_list)
for item in body_list:
    try:
        img = imdecode(fetcher.get(item[0]))
        pd_list.append(img.shape)  
    except Exception as e:
        print("Error:%s, item:%s" % (e, item))    

# 基本的数据统计
df = pd.DataFrame(pd_list, columns=['height', 'width', 'channel'])
print("------------基本的数据统计------------")
print("图片总数:%s" % df.shape[0])
print("宽均值:%s;高均值:%s" % (df.width.mean(), df.height.mean()))
print("宽最大:%s;高最大:%s" % (df.width.max(), df.height.max()))
print("宽最小:%s;高最小:%s" % (df.width.min(), df.height.min()))
print()


"""
数据增强,可视化对比
"""
body_len = 6
for i in range(body_len):
    item = body_list[i]
    img = imdecode(fetcher.get(item[0]))
    images = np.array(
        [img] * NUM,
        dtype=np.uint8
    )
    write_img = np.zeros((H, (W+10)*(NUM+1), 3), dtype = np.uint8)
    write_img[:, 0: W, :] =  cv2.resize(img, (W, H)) # 结果中加入原图用于可视化对比
    images_aug = seq(images=images)  # 应用增强
    
    # 结果中加入处理后的图片
    for i, img in enumerate(images_aug):
        write_img[:, (i+1)*(W+10): (i+1)*(W+10)+W, :] = img 
    
    # 保存图片
    file_path = item[2].split('/n')
    file_name = file_path[len(file_path) - 1].split(".")[0]
    cv2.imwrite("day_01/%s.jpg" % file_name, write_img)
    
    # 可视化
    show_plt(write_img, 30, 10)


第三周作业

【AI培训第三期课后作业内容帖】

import requests
import time
import json
import re
import random


def parse_cookies(cookie_str):
    """
    解析cookies字符串
    """
    cookies = {}
    for line in cookie_str.split(';'): 
        key, value = line.split('=', 1) 
        cookies[key] = value.encode("utf-8").decode("latin1")
    return cookies


def fetch_url(url, hearders, cookies):
    """
    请求url,返回内容
    """
    try:                   #异常处理
        resp = requests.get(url, headers = headers, cookies = cookies)
        resp.raise_for_status()
        return resp.content #返回网页内容                       
    except Exception as e:
        print(e)  

def get_resp_content(url, headers, cookies):
    content = fetch_url(url, headers, cookies)
    return content.decode('utf-8')
    
        
def parse_content_json(content_json, element_name):
    """
    解析json格式
    """
    value_list = []
    data_list = json.loads(content_json)
    for data in data_list:
        # 百度图片地址 "thumbURL"
        element_value = data.get(element_name)
        if element_value:
            value_list.append(element_value)
    return value_list


def re_search_one(content, regex, output_group):
    """
    正则匹配,返回匹配到的第一个内容
    """
    # 百度图片 r'.*"data":(.*)}'
    searchObj = re.search(regex, content, re.M|re.I)
    content_group = output_group % searchObj.group(1)
    return content_group


def download_resource(url_list, file_name_regex, save_dir, file_suffix):
    """
    下载 url列表中的文件
    """
    for url in url_list:
        file_name = re_search_one(url, file_name_regex, "%s")   # 解析文件名称
        file_path = "%s/%s%s" % (save_dir, file_name, file_suffix)  # 生成路径
        resource = fetch_url(url, None, None)   # 获取资源
        with open(file_path, "wb") as w:    # 本地持久化
            w.write(resource)
            
        wait_second = random.randint(2, 8)
        time.sleep(wait_second)  # 等待
        print("wait second: %s ; save file: %s" % (wait_second, file_path))

cookie_str = r'$cookie'
headers = {'User-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36'}
resp_content_regex = r'.*"data":(.*)'  # resp返回内容正则匹配
output_group = "%s]}]"
file_name_regex = r'.*u=(.*)&fm=.*'  # 文件名称正则匹配
element_name = "thumbURL"  # resp中指定获取的元素名称
file_suffix = ".jpg"  # 文件后缀名
save_dir = "$save_dir"  # 文件保存目录

i = 1
num = 30
while i < 2000:
    url = "https://image.baidu.com/search/acjson?tn=resultjson_com&logid=6690820716424463597&ipn=rj&ct=201326592&is=&fp=result&queryWord=%E5%85%85%E6%B0%94%E6%8B%B1%E9%97%A8&cl=2&lm=-1&ie=utf-8&oe=utf-8&adpicid=&st=-1&z=&ic=0&hd=&latest=&copyright=&word=%E5%85%85%E6%B0%94%E6%8B%B1%E9%97%A8&s=&se=&tab=&width=&height=&face=0&istype=2&qc=&nc=1&fr=&expermode=&force=&pn={}&rn={}&gsm=3c&1622605320685=".format(i, num)

    print(url)
    # 解析 cookies 生成对象
    cookies = parse_cookies(cookie_str)
    # 请求url,获取数据
    content = get_resp_content(url, headers, cookies)
    # 正则匹配过滤,获取json格式数据
    re_result = re_search_one(content, resp_content_regex, output_group)
    # 解析json,获取元素数据
    value_list = parse_content_json(re_result, element_name)
    # 列表中的文件
    download_resource(value_list, file_name_regex, save_dir, file_suffix)
    
    i += num
    time.sleep(random.randint(10, 40))
第四周作业


















image

第五周作业

【作业要求】

https://git-core.megvii-inc.com/ai_train/ai-xionglei/-/tree/master/week_05

许建亮:

第一周作业

一、数据统计输出
最大宽1143, 高1600
最小宽107, 高150
平均宽400.01485148514854, 高499.2524752475247
二、可视化


三、代码

from meghair.utils import io
from meghair.utils.imgproc import imdecode
from refile import smart_sync
from IPython.display import Image as IMG

import cv2
import boto3
import refile
import nori2 as nori
import numpy as np
import imgaug.augmenters as iaa

W = 128
H = 128
host = "http://oss.i.brainpp.cn"

# 数据增强参数
seq = iaa.Sequential([
    iaa.Crop(px=(0, 128)),
    iaa.Affine(rotate=(-45, 45)),
    iaa.Resize({"height": H, "width": W})
], random_order=True)

def get_nori_list():
    # 同步数据到本地
    smart_sync('s3://ai-cultivat/1percent_ImageNet.txt', '/home/xujianliang/1.txt')
    # 打开本地文件,读取nori数据信息
    f = open('/home/xujianliang/1.txt')
    lines = f.read()

    # 将txt文本转化为 list
    nori_list = []
    nori_txt = lines.split("\n")
    for pic_info in nori_txt:
        if pic_info != '':
            nori_list.append(pic_info.split("\t"))
    return nori_list
    
def show_plt(img):
    """
    img: numpy.ndarray cv2.imread 打开的图片
    """
    print(type(img))
    img2 = img[:,:,::-1]
    plt.imshow(img2)
    plt.show()
    
def get_statistics(image_w, image_h):
    """
    获取统计信息
    """
    print("最大宽{}, 高{}".format(max(image_w), max(image_h)))
    print("最小宽{}, 高{}".format(min(image_w), min(image_h)))
    print("平均宽{}, 高{}".format(np.mean(image_w), np.mean(image_h)))
    
def process_image(images):
    image_w = []
    image_h = []
    # 读取 nori图片文件
    fetcher = nori.Fetcher()
    for i, item in enumerate(images):
        nori_id = item[0]
    #     print(nori_id)
        img = imdecode(fetcher.get(nori_id))
        image_w.append(img.shape[0])
        image_h.append(img.shape[1])
        images = np.array([img] * 5, dtype=np.uint8)
        write_img = np.zeros(shape=(H, (W + 10) * 5, 3), dtype=np.uint8)
        # 数据增强
        images_auq = seq(images=images)
        res = np.zeros(shape=((H + 10) * len(images), (W + 10) * 5, 3), dtype=np.uint8)
        for j, img_aug in enumerate(images_auq):
            write_img[:, j * (W + 10): j * (W + 10) + W, :] = img_aug
            cv2.imwrite("/home/xujianliang/aitest/{}_".format(i) + item[2].split('/')[1], img_aug)
        show_plt(write_img)
        # 只输出前20条数据
        if i > 20:
            break
    return image_w, image_h

nori_list = get_nori_list()
image_w, image_h = process_image(nori_list)
get_statistics(image_w, image_h)
第三周作业

import re
import urllib.request
from urllib import request

获取顶级Html

def getSuperHtmlCode(url):
print(‘start-getsuperhtml’)
with request.urlopen(url) as f:
data = f.read()
print(‘Status:’, f.status, f.reason)
for k, v in f.getheaders():
print(’%s: %s’ % (k, v))
print(‘Data:’, data.decode(‘utf-8’))
return data

getHtml数据

def getHtml(url):
print(‘start-gethtml’)
page = urllib.request.urlopen(url) # urllib.request.urlopen()方法用于打开一个URL地址
html = page.read() # read()方法用于读取URL上的数据
return html
def getImg(html):
reg = r’src="(.+?.jpg)" pic_ext’ # 正则表达式,得到图片地址
imgre = re.compile(reg) # re.compile() 可以把正则表达式编译成一个正则表达式对象.
html = html.decode(‘utf-8’) # python3
imglist = re.findall(imgre, html)
x = 0

for imgurl in imglist:
    urllib.request.urlretrieve(imgurl, 'D:\gongmen\%s.jpg' % x)
    x += 1

html = getSuperHtmlCode(“https://image.baidu.com/search/index?tn=baiduimage&ps=1&ct=201326592&lm=-1&cl=2&nc=1&ie=utf-8&word=充气拱门”)
print(html)
print(getImg(html))

第五周作业

https://git-core.megvii-inc.com/ai_train/ai-xujianliang/-/tree/master/week_05

殷邦忠作业

第一周


image

第三周

-- coding: utf-8 --

import requests, time, os

抄自:https://cloud.tencent.com/developer/article/1689127

data = input(‘充气拱门’)

page = eval(input(‘1’))

str_table = {
‘_z2C$q’: ‘:’,
‘_z&e3B’: ‘.’,
‘AzdH3F’: ‘/’
}
char_table = {
‘w’: ‘a’,
‘k’: ‘b’,
‘v’: ‘c’,
‘1’: ‘d’,
‘j’: ‘e’,
‘u’: ‘f’,
‘2’: ‘g’,
‘i’: ‘h’,
‘t’: ‘i’,
‘3’: ‘j’,
‘h’: ‘k’,
‘s’: ‘l’,
‘4’: ‘m’,
‘g’: ‘n’,
‘5’: ‘o’,
‘r’: ‘p’,
‘q’: ‘q’,
‘6’: ‘r’,
‘f’: ‘s’,
‘p’: ‘t’,
‘7’: ‘u’,
‘e’: ‘v’,
‘o’: ‘w’,
‘8’: ‘1’,
‘d’: ‘2’,
‘n’: ‘3’,
‘9’: ‘4’,
‘c’: ‘5’,
‘m’: ‘6’,
‘0’: ‘7’,
‘b’: ‘8’,
‘l’: ‘9’,
‘a’: ‘0’
}
char_table = {ord(key): ord(value) for key, value in char_table.items()}

def decode(url):
# 先替换字符串
for key, value in str_table.items():
url = url.replace(key, value)
# 再替换剩下的字符
return url.translate(char_table)

number = 0

def pull_image(page):
data = ‘充气拱门’
url = 'https://image.baidu.com/search/acjson?’
headers = {
‘User-Agent’: ‘Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.125 Safari/537.36’,
‘Host’: ‘image.baidu.com’,
‘Cookie’: ‘’}

params = {
    'tn': 'resultjson_com',
    'ipn': 'rj',
    'ct': 201326592,
    'is': '',
    'fp': 'result',
    'queryWord': data,
    'cl': 2,
    'lm': -1,
    'ie': 'utf-8',
    'oe': 'utf-8',
    'adpicid': '',
    'st': -1,
    'z': '',
    'ic': 0,
    'hd': '',
    'latest': '',
    'copyright': '',
    'word': data,
    's': '',
    'se': '',
    'tab': '',
    'width': '',
    'height': '',
    'face': 0,
    'istype': 2,
    'qc': '',
    'nc': 1,
    'fr': '',
    'expermode': '',
    'force': '' '',
    'cg': 'girl',
    'pn': '{}'.format(page * 30),  # 改参数
    'rn': 30,  # 改 参数
    'gsm': '1e',
    '{}'.format(str(int(time.time() * 1000))): '',
}  # 带参数

res = requests.get(url, headers=headers, params=params).json(strict=False)
list1 = [i for i in res['data']]
print("  page: {:>10d},\tlen(list1): {}".format(page, len(list1)))
global number
for i in list1:
    number += 1
    if i:
        name = i['fromPageTitleEnc']
        u = decode(i['objURL'])
        print("number: {:>10d},\tname: {}".format(number, name))
        with open(os.path.join(os.getcwd(), 'pictures', '{}.jpg'.format(number)), 'wb')as f:
            f.write(requests.get(u).content)

    break

def do():
for i in range(10):
pull_image(i)
pass

if name == “main”:
do()

崔晋梅作业

第一周作业
import random
from imgaug import augmenters as iaa
from io import BytesIO
import numpy as np
from PIL import Image
import matplotlib
import matplotlib.pyplot as plt
from itertools import chain
import nori2 as nori

fetcher = nori.Fetcher()

def read_nori_image(nori_id):
    """
    读取nori中的图片信息
    """
    img = fetcher.get(nori_id)
    pil_im = Image.open(BytesIO(img))
    im_array = np.asarray(pil_im)
    return im_array            

def read_images(path):
    with open(path, "r") as f:
        return [
            (line.split()[0], line.split()[1]) for line in f.readlines() if line
        ]

def enhancement(image, seq):
    NUM = 2
    H, W, _ = image.shape
    images = np.array(
        [image] * NUM,
        dtype=np.uint8
    )
    write_img = np.zeros((H, (W+10)*(NUM+1), 3), dtype=np.uint8)
    images_aug = seq.augment_images(images)
    for i, img in enumerate(chain([image], images_aug)):
        write_img[:, i*(W+10) : i*(W+10)+W, :] = img
    return write_img

def enhancement_images(path):
    seq = iaa.Sequential([
        iaa.Crop(px=(0,16)), # 随机在 0-16px 范围选择 Crop 图片
        iaa.Affine(  # 仿射变换
            rotate=(-15, 15),  # 旋转±45度之间
            translate_percent={"x": (-0.2, 0.2), "y": (-0.2, 0.2)},  # 平移±20%之间
            scale={"x": (0.6, 1.3), "y": (0.6, 1.3)},  # 缩放 80%~120% 之间
        )
    ], random_order=True)

    for nori_id, _ in random.sample(read_images(path), 6):
        img = read_nori_image(nori_id)
        img2 = enhancement(img, seq)

        # Acquire default dots per inch value of matplotlib
        dpi = matplotlib.rcParams['figure.dpi']
        # Determine the figures size in inches to fit your image
        height, width, depth = img2.shape
        figsize = width / float(dpi) * 0.5, height / float(dpi) * 0.5
        plt.figure(figsize=figsize)
        plt.imshow(img2, aspect='auto')

    plt.show()

def stat_images(path):
    shapes = []
    for nori_id, _ in read_images(path)[:2]:
        img = read_nori_image(nori_id)
        shapes.append(img.shape)
    ws = np.array(shapes)[:,1]
    hs = np.array(shapes)[:,0]
    print("平均宽度:{}, 最大宽度:{}, 最小宽度:{}".format(ws.mean(), ws.max(), ws.min()))
    print("平均长度:{}, 最大长度:{}, 最小长度:{}".format(hs.mean(), hs.max(), hs.min()))

path = "/home/cuijinmei/1percent_ImageNet.txt"
stat_images(path)
enhancement_images(path)

第三周作业
import requests
import time
import json
import re
import random


def parse_cookies(cookie_str):
"""
解析cookies字符串
"""
cookies = {}
for line in cookie_str.split(';'): 
    key, value = line.split('=', 1) 
    cookies[key] = value.encode("utf-8").decode("latin1")
return cookies


def fetch_url(url, hearders, cookies):
"""
请求url,返回内容
"""
try:                   #异常处理
    resp = requests.get(url, headers = headers, cookies = cookies)
    resp.raise_for_status()
    return resp.content #返回网页内容                       
except Exception as e:
    print(e)  

def get_resp_content(url, headers, cookies):
content = fetch_url(url, headers, cookies)
return content.decode('utf-8')

    
def parse_content_json(content_json, element_name):
"""
解析json格式
"""
value_list = []
data_list = json.loads(content_json)
for data in data_list:
    # 百度图片地址 "thumbURL"
    element_value = data.get(element_name)
    if element_value:
        value_list.append(element_value)
return value_list


def re_search_one(content, regex, output_group):
"""
正则匹配,返回匹配到的第一个内容
"""
# 百度图片 r'.*"data":(.*)}'
searchObj = re.search(regex, content, re.M|re.I)
content_group = output_group % searchObj.group(1)
return content_group


def download_resource(url_list, file_name_regex, save_dir, file_suffix):
"""
下载 url列表中的文件
"""
for url in url_list:
    file_name = re_search_one(url, file_name_regex, "%s")   # 解析文件名称
    file_path = "%s/%s%s" % (save_dir, file_name, file_suffix)  # 生成路径
    resource = fetch_url(url, None, None)   # 获取资源
    with open(file_path, "wb") as w:    # 本地持久化
        w.write(resource)
        
    wait_second = random.randint(2, 8)
    time.sleep(wait_second)  # 等待
    print("wait second: %s ; save file: %s" % (wait_second, file_path))

cookie_str = r'$cookie'
headers = {'User-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36'}
resp_content_regex = r'.*"data":(.*)'  # resp返回内容正则匹配
output_group = "%s]}]"
file_name_regex = r'.*u=(.*)&fm=.*'  # 文件名称正则匹配
element_name = "thumbURL"  # resp中指定获取的元素名称
file_suffix = ".jpg"  # 文件后缀名
save_dir = "$save_dir"  # 文件保存目录

i = 1
num = 30
while i < 2000:
url = "https://image.baidu.com/search/acjson?tn=resultjson_com&logid=6690820716424463597&ipn=rj&ct=201326592&is=&fp=result&queryWord=%E5%85%85%E6%B0%94%E6%8B%B1%E9%97%A8&cl=2&lm=-1&ie=utf-8&oe=utf-8&adpicid=&st=-1&z=&ic=0&hd=&latest=&copyright=&word=%E5%85%85%E6%B0%94%E6%8B%B1%E9%97%A8&s=&se=&tab=&width=&height=&face=0&istype=2&qc=&nc=1&fr=&expermode=&force=&pn={}&rn={}&gsm=3c&1622605320685=".format(i, num)

print(url)
# 解析 cookies 生成对象
cookies = parse_cookies(cookie_str)
# 请求url,获取数据
content = get_resp_content(url, headers, cookies)
# 正则匹配过滤,获取json格式数据
re_result = re_search_one(content, resp_content_regex, output_group)
# 解析json,获取元素数据
value_list = parse_content_json(re_result, element_name)
# 列表中的文件
download_resource(value_list, file_name_regex, save_dir, file_suffix)

i += num
time.sleep(random.randint(10, 40))
第五周作业

https://git-core.megvii-inc.com/ai_train/ai-cuijinmei/-/tree/master/week4

吕梦琪

第一周作业

import cv2
import boto3
import imgaug as ia
import imgaug.augmenters as iaa
import nori2 as nori
import numpy as np
from meghair.utils import io
from meghair.utils.imgproc import imdecode
from matplotlib import pyplot as plt
%matplotlib inline

可视化

class ImageShow():
“”"
按照指定布局可视化图片

n_rows: int 布局行数
n_cols: int 布局列数
figsize: list  设置图片大小:[x, y] 宽度,高度以英寸为单位。
"""
def __init__(self, n_rows, n_cols, figsize=[6.4, 4.8]):
    self.n_rows = n_rows
    self.n_cols = n_cols
    self.fig, self.axes = plt.subplots(n_rows, n_cols, sharex=True, squeeze=False, figsize= figsize)
    self.imgs = []

def imgs_append(self, img):
    """
    将图片数据追加至图片数组
    
    img: numpy.ndarray 使用 cv2.imread 打开的图片
    """
    b, g, r = cv2.split(img)  # 分解Opencv里的标准格式B、G、R
    img = cv2.merge([r,g, b])  # 将BGR格式转化为常用的RGB格式
    self.imgs.append(img)
    
def show_plt(self):
    """
    可视化图片数组
    """
    n_rows = self.n_rows
    n_cols = self.n_cols
    for i in range(n_rows):
        for j in range(n_cols):
            index = i* n_cols + j 
            axe = self.axes[i][j]
            axe.axis("off")
            if index < len(self.imgs):
                axe.imshow(self.imgs[index])

可视化End

fetcher = nori.Fetcher()

ia.seed(1)
NUM = 6

定义imgaug增强方法

seq = iaa.Sequential([
iaa.Crop(px=(0, 16)),
iaa.Affine(rotate=(-25, 25))
], random_order=True)

从OSS下载“1percent_ImageNet.txt”文件

host = “http://oss.i.brainpp.cn
s3_client = boto3.client(‘s3’, endpoint_url=host)
resp = s3_client.download_file(“ai-cultivate”, “1percent_ImageNet.txt”, “1percent_ImageNet.txt”)

img_show = ImageShow(2, 1, [20, 8])
img_heights =
img_widths =
for i, line in enumerate(open(“1percent_ImageNet.txt”)):
# 获得单个图片
img = imdecode(fetcher.get(line.split()[0]))

# 获取图片高宽
sp = img.shape # 0:height 1:width 2:colors
img_heights.append(sp[0])
img_widths.append(sp[1])

images = np.array(
    [img] * NUM,
    dtype=np.uint8
)
H = sp[0]
W = sp[1]
write_img = np.zeros((H, (W+10)*NUM, 3), dtype=np.uint8)
# 进行增强操作
images_aug = seq.augment_images(images=images)
for j, img in enumerate(images_aug):
    # 将多个增强图片添加到一个数组中
    write_img[:, j*(W+10): j*(W+10)+W, :] = img

print(“总图片 %s” % len(img_heights))
print(“最大高 %s” % int(max(img_heights)))
print(“最小高 %s” % int(min(img_heights)))
print(“平均高 %s” % (int(sum(img_heights)) / len(img_heights)))

print(“最大宽 %s” % int(max(img_widths)))
print(“最小宽 %s” % int(min(img_widths)))
print(“平均宽 %s” % (int(sum(img_widths)) / len(img_widths)))

img_show.show_plt()]

总图片 12811

最大高 5065

最小高 21

平均高 405.4505503083288

最大宽 4368

最小宽 46

平均宽 471.49918039185076
image

第三周作业

import requests
import time
import json
import re
import random

def parse_cookies(cookie_str):
“”"
解析cookies字符串
“”"
cookies = {}
for line in cookie_str.split(’;’):
key, value = line.split(’=’, 1)
cookies[key] = value.encode(“utf-8”).decode(“latin1”)
return cookies

def fetch_url(url, hearders, cookies):
“”"
请求url,返回内容
“”"
try: #异常处理
resp = requests.get(url, headers = headers, cookies = cookies)
resp.raise_for_status()
return resp.content #返回网页内容
except Exception as e:
print(e)

def get_resp_content(url, headers, cookies):
content = fetch_url(url, headers, cookies)
return content.decode(‘utf-8’)

def parse_content_json(content_json, element_name):
“”"
解析json格式
“”"
value_list =
data_list = json.loads(content_json)
for data in data_list:
# 百度图片地址 “thumbURL”
element_value = data.get(element_name)
if element_value:
value_list.append(element_value)
return value_list

def re_search_one(content, regex, output_group):
“”"
正则匹配,返回匹配到的第一个内容
“”"
# 百度图片 r’.“data”:(.)}’
searchObj = re.search(regex, content, re.M|re.I)
content_group = output_group % searchObj.group(1)
return content_group

def download_resource(url_list, file_name_regex, save_dir, file_suffix):
“”"
下载 url列表中的文件
“”"
for url in url_list:
file_name = re_search_one(url, file_name_regex, “%s”) # 解析文件名称
file_path = “%s/%s%s” % (save_dir, file_name, file_suffix) # 生成路径
resource = fetch_url(url, None, None) # 获取资源
with open(file_path, “wb”) as w: # 本地持久化
w.write(resource)

    wait_second = random.randint(2, 8)
    time.sleep(wait_second)  # 等待
    print("wait second: %s ; save file: %s" % (wait_second, file_path))

cookie_str = r’$cookie’
headers = {‘User-agent’:‘Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36’}
resp_content_regex = r’.“data”:(.)’ # resp返回内容正则匹配
output_group = “%s]}]”
file_name_regex = r’.u=(.)&fm=.*’ # 文件名称正则匹配
element_name = “thumbURL” # resp中指定获取的元素名称
file_suffix = “.jpg” # 文件后缀名
save_dir = “$save_dir” # 文件保存目录

i = 1
num = 30
while i < 2000:
url = “https://image.baidu.com/search/acjson?tn=resultjson_com&logid=6690820716424463597&ipn=rj&ct=201326592&is=&fp=result&queryWord=充气拱门&cl=2&lm=-1&ie=utf-8&oe=utf-8&adpicid=&st=-1&z=&ic=0&hd=&latest=&copyright=&word=充气拱门&s=&se=&tab=&width=&height=&face=0&istype=2&qc=&nc=1&fr=&expermode=&force=&pn={}&rn={}&gsm=3c&1622605320685=”.format(i, num)

print(url)
# 解析 cookies 生成对象
cookies = parse_cookies(cookie_str)
# 请求url,获取数据
content = get_resp_content(url, headers, cookies)
# 正则匹配过滤,获取json格式数据
re_result = re_search_one(content, resp_content_regex, output_group)
# 解析json,获取元素数据
value_list = parse_content_json(re_result, element_name)
# 列表中的文件
download_resource(value_list, file_name_regex, save_dir, file_suffix)

i += num
time.sleep(random.randint(10, 40))
第五周作业

https://git-core.megvii-inc.com/ai_train/ai-lvmengqi/-/tree/master/

钟尚武作业

第一周作业

第一周作业-董春仪



第二周作业
import os
import urllib.request

import requests

searchKey = "充气拱门"
pageSize = "1000"

url = "https://image.baidu.com/search/acjson?tn=resultjson_com&logid=6882362818101869242" \
      f"&ipn=rj&ct=201326592&is=&fp=result&queryWord={searchKey}" \
      "&cl=2&lm=-1&ie=utf-8&oe=utf-8&adpicid=&st=-1&z=&ic=0&hd=&latest=&copyright=" \
      "&word=%E5%85%85%E6%B0%94%E6%8B%B1%E9%97%A8&s=&se=&tab=&width=&height=&face=0" \
      f"&istype=2&qc=&nc=1&fr=&expermode=&nojc=&pn=30&rn={pageSize}&gsm=1e&1622791541951="
imgs_path = "imgs/"
headers = {
    "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36"}

res = requests.get(url, headers=headers)
imgs_url = [_["hoverURL"] for _ in res.json()["data"] if _]

if not os.path.exists(imgs_path):
    os.makedirs(imgs_path)
i = 0
for img_url in imgs_url:
    image_name = f"{imgs_path}/{i}.jpg"
    urllib.request.urlretrieve(img_url, filename=image_name)
    # 下载图片,并保存到文件夹中