#!/usr/bin/env/python # -*- coding:utf-8 -*- from pprint import pprint from typing import Any # from utils.exam_type import get_exam_type # from utils.get_data import Mongo from structure.final_structure import one_item_structure from utils.stem_ans_split import get_split_pos from utils.washutil import * from utils.washutil_for_DL_way import HtmlWash_2 from structure.three_parse_structure import * from utils.pic_pos_judge import img_regroup from func_timeout import func_set_timeout import requests from structure.ans_structure import get_ans_match from utils.xuanzuoti2slave import toslave_bef, toslave_aft logger = configs.myLog(__name__, log_cate="reparse_ruku_log").getlog() paper_types = ["第三种试卷格式:题目与答案分开", "第二种试卷格式: 不同时含有或都不含有{答案}和{解析}关键字", "第一种试卷格式:教师用卷,含答案和解析关键字"] class WordParseStructure: """ 基于wordbin出来的html结果进一步做 试卷类型 非模板结构化 """ def __init__(self, html, wordid, is_reparse=0, must_latex=0, source="zxhx", subject="数学"): self.html = html self.is_reparse = is_reparse self.wordid = wordid self.must_latex = must_latex self.source = source self.subject = subject def __call__(self): if self.source not in ["school"]: # == "school" "xue_guan", "teacher": res = self.structure_combine_DL() if not res[0]: return self.structure() logger.info("----【paper_id:{}】采用切题服务".format(self.wordid)) return res else: return self.structure() def structure_combine_DL(self): # 第一步:清洗 htmltext, row_list, new_html = HtmlWash_2(self.html, self.wordid, self.is_reparse, must_latex=self.must_latex).html_cleal() if not row_list: return {"errcode": 1, "errmsgs": "题文没有有效信息", "data": {}}, "" # 第二步:寻找题目和答案的切分点,一定要有“答案”关键字 split_res = get_split_pos(row_list) if type(split_res) == str: return {"errcode": 1, "errmsgs": split_res, "data": {}}, paper_types[0] row_list, items_list, ans_list, _ = split_res rd1_may_fail = 0 paper_type = "" item_res = {} if "【答案】" in "".join(items_list) or "【解析】" in "".join(items_list): rd1_may_fail = 1 elif items_list: paper_type = "第三种试卷格式:题目与答案分开" try: r1 = requests.post(url=configs.topic_segment_ip, json={"content": "
".join(items_list), "subject": self.subject, "paper_id": self.wordid, "text_type": "stem_block"}) item_res = r1.json()["res"] # print(item_res) r2 = requests.post(url=configs.topic_segment_ip, json={"content": "
".join(ans_list), "subject": self.subject, "paper_id": self.wordid, "text_type": "answer_block"}) all_ans, ans_no = r2.json()["res"] # print(1111111111111,all_ans) print(ans_no) # 根据ans_no纠正切错的all_ans,如[2, 6, 4, None, 7, None, 5, None, 1] if abs(len([i for i in ans_no if i]) - len(item_res)) <= 2: last_idx = None new_ans_no = ans_no.copy() for i, no in enumerate(ans_no): if no is not None: last_idx = i if i > 0 and no is None and last_idx is not None: all_ans[last_idx] += "\n"+all_ans[i] all_ans[i] = "" new_ans_no[i] = "del" all_ans = [j for j in all_ans if j] ans_no = [i for i in new_ans_no if i != 'del'] if abs(len(ans_no) - len(item_res)) > 2: item_res = ans_block_split(ans_list, item_res) else: item_res = get_ans_match(item_res, all_ans, ans_no, {}, 'model_split') except Exception as e: logger.info("----【paper_id:{}】切题服务异常:{}".format(self.wordid, e)) else: rd1_may_fail = 1 if rd1_may_fail: try: r3 = requests.post(url=configs.topic_segment_ip, json={"content": htmltext, "subject": self.subject, "paper_id": self.wordid, "text_type": "stem_block"}) item_res = r3.json()["res"] # 还需判断下教师卷 for k, one_res in enumerate(item_res): if re.search('\n【(答案|[解分][析答]|详解|点[评睛]|考点|专题)】', one_res["stem"]): case = "case1" # 默认有“答案”关键字 if re.search(r'\n【答案】|[\n】]\s*答案\s*[::]', one_res["stem"]) is None: # 没“答案”关键字 case = "case0" dd1 = stem_ans_split(one_res, case) # 对切分后的每道题再细分 one_res["stem"] = dd1["stem"] del dd1["stem"] one_res.update(dd1) else: # 没有解析的情况 one_res.update({"key": "", "parse": ""}) except Exception as e: logger.info("----【paper_id:{}】切题服务异常:{}".format(self.wordid, e)) # ==========小题结构化======== if item_res: # 答案解析字段完善 for i, one_item in enumerate(item_res): if 'key' not in one_item: item_res[i]['key'] = "" if 'parse' not in one_item: item_res[i]['parse'] = "" # 单题结构化 consumer = ['noslave'] * len(item_res) items_no_type = [1] * len(item_res) xyz = zip(item_res, consumer, items_no_type) res = list(map(one_item_structure, xyz)) # 和多进程相比,这样速度也很快 # pprint(res) # ==========最后的清洗========= res = wash_after(res, self.subject) # 针对模型可能切错的地方纠正,放在切割模型预测中纠正了 # for i, one_item in enumerate(res): # if i>0 and one_item['topic_num'] is None and res[i-1]['topic_num'] is not None and res[i+1]['topic_num'] is not None \ # and res[i+1]['topic_num'] - res[i-1]['topic_num'] == 1 and not one_item['key'] and not one_item['parse']: # if res[i-1]["parse"]: # res[i - 1]["parse"] += one_item['stem'] # del res[i] # elif res[i-1]["key"]: # res[i - 1]["key"] += one_item['stem'] # del res[i] # pprint(res) # 结果返回 if self.is_reparse: return {"html":new_html, "items": res}, paper_type else: return {"items": res}, paper_type else: return {}, paper_type def img_repl(self, one_dict): """ 初步拆分题目后,图片信息的替换 :return: """ imgs = {s: re.findall("", one_dict[s]) for s in ['stem', 'key', 'parse']} for k, imgs_seq in imgs.items(): for img in imgs_seq: img = re.sub("(?", one_dict["analy"]): img = re.sub("(? 10: # 带相同个数的答案和解析 paper_type = paper_types[2] item_res = split_by_keywords(row_list) if type(item_res) == str and re.search("格式有误|没有换行|题型不明确|题型行格式有问题", item_res): print("第一种试卷格式解析格式有误") try: paper_type = paper_types[1] item_res = split_by_topicno(row_list) except: return {"errcode": 1, "errmsgs": item_res, "data": {}}, paper_type else: paper_type = paper_types[1] item_res = split_by_topicno(row_list) if type(item_res) == str: return {"errcode": 1, "errmsgs": item_res, "data": {}}, paper_type else: item_list = item_res if type(item_res) == tuple: item_list, item_no_type = item_res # pprint(item_list) print('****************初步切分题目的个数*****************', len(item_list)) res = [] if item_list: item_list = img_regroup(item_list, row_list) # 图片重组判断 if self.subs2src: item_list = list(map(self.img_repl, item_list)) # 图片信息替换还原 # ---------初步拆分题目错误判断-------------------- # ---------新题型进一步拆分-------------------- # new_item = [[k, i] for k, i in enumerate(item_list) if re.search("选[修学考]", i["stem"][:10])] # have_slave = 0 # to_slave = {} # if new_item: # try: # have_slave = 1 # for one in new_item: # new_res = toslave_bef(one[1]) # item_list[one[0]] = new_res # if type(new_res) == list: # to_slave[one[0]] = new_res # except: # pass # if to_slave: # item_list = [i if type(i) == list else [i] for i in item_list] # item_list = sum(item_list, []) # ==========小题结构化======== # from multiprocessing.dummy import Pool as ThreadPool # pool = ThreadPool(2) # 比# pool = multiprocessing.Pool(3)速度快 consumer = ['toslave'] * len(item_list) items_no_type = [item_no_type] * len(item_list) xyz = zip(item_list, consumer, items_no_type) # res = list(pool.map(one_item_structure, xyz)) res = list(map(one_item_structure, xyz)) # 和多进程相比,这样速度也很快 # pprint(res) # ==========最后的清洗========= res = wash_after(res) # if have_slave and not to_slave: # res = list(map(toslave_aft, res)) # 结果返回 if self.is_reparse: return {"html":new_html, "items": res}, paper_type else: return {"items": res}, paper_type @staticmethod def _get_all_errors(res): """ 整套试卷结构化完成以后,把所有报错放在一个list里面: all_errors = [{"单选题第1题目":[]},{"解答题第2题":[]},{},{}] :param res: :return: """ type_names = [] errmgs = [] spliterr_point = [] for one_res in res: type_names.append(one_res["type"]) if "text_errmsgs" in one_res: errmgs.append(one_res["text_errmsgs"]) else: errmgs.append("") if 'spliterr_point' in one_res: spliterr_point.append(one_res['spliterr_point']) # 给同种题型的名字重新编码 new_names = [] for k, v in enumerate(type_names): if v: nums = str(type_names[:k]).count(v) else: nums = k if spliterr_point: add_n = insert_sort2get_idx(spliterr_point, k+1) new_names.append("{}第{}题(在整份word中的序号为{}题)".format(v, nums + 1 + add_n, k + 1 + add_n)) else: new_names.append("{}第{}题(在整份word中的序号为{}题)".format(v, nums + 1, k + 1)) all_errors = [] for name, error in zip(new_names, errmgs): if len(error) > 0: all_errors.append({name: error}) return all_errors if __name__ == '__main__': # 单份试卷测试 import json from bson.objectid import ObjectId # path1 = r"F:\zwj\parse_2021\data\fail\2\2.txt" # path = r"F:\zwj\parse_2021\res_folder\13.html" # images_url1 = "" # "http://49.233.23.58:11086/ser_static/4439/files/" # html = "

"+"

\n

".join(html.split("\n"))+"

" # with open(r"F:\zwj\Text_Structure\fail_files3\c5e222c5fbded2a2264ae002907fc92c__2021_04_16_18_43_23.json", 'r') as load_f: # html = json.load(load_f) # print(load_dict) # path2 = r"C:\Users\Python\Desktop\bug\5-9\663c90361ec1003b58557474.html" path2 = r"F:\zwj\Text_Structure\accept_files\664597dd71453ba19c20977f.html" # path2 = r"C:\Users\Python\Desktop\bug\6419746d11a1cdad550f5502.html" # path2 = r"F:\zwj\Text_Structure\new_tiku_structure_v3_sci\data\620bbf7aa7d375f4518b98e1.html" # path2 = r"F:\zwj\new_word_text_extract_v2\data\地理\2\2020-2021学年广东省揭阳市揭西县五校九年级(下)第二次联考地理试卷-普通用卷.html" # path2 = r"F:\zwj\new_word_parse_2021\data\huaxue\huexue2.html" # path2 = r"C:\Users\Python\Desktop\bug\6258cc7af84c0e279ac64301.html" # 正则卡死 # path2 = r"C:\Users\Python\Desktop\bug\629073b9f84c0e279ac64811.html" # 正则卡死 # 62650d5cf84c0e279ac643f1 6258cc7af84c0e279ac64301 62660fa2f84c0e279ac643f5 html = open(path2, "r", encoding="utf-8").read() # html = """ # \n

1.下列化学符号中的数字“”表示的意义不正确的是

\n

A.:“”表示两个氧原子

\n

B.:“”表示一个二氧化氮分子含有两个氧原子

\n

C.:“”表示两个氢氧根离子

\n

D.:“”表示氧化镁中镁元素的化合价为

\n

【答案】

\n

A

\n

【解析】

\n

根据元素符号前面的数字表示原子的个数,元素符号右下角的数字表示一个分子中的原子个数,离子符号前面的数字表示离子的个数,元素符号正上方的数字表示元素的化合价。A.:“”表示一个氧分子由两个氧原子组成,故选项表示的意义不正确;B.元素符号右下角的数字表示一个分子中的原子个数,故:“”表示一个二氧化氮分子含有两个氧原子,故表示的意义正确;C.离子符号前面的数字表示离子的个数,故:“”表示两个氢氧根离子,故表示的意义正确;D.元素符号正上方的数字表示元素的化合价,故中的“”表示镁元素的化合价为价,故表示的意义正确。故选:A。

\n

\n

2.亚油酸具有降低人体血液中胆固醇及血脂的作用,它的化学式为,下列说法中正确的是

\n

A.亚油酸是由三个元素构成的化合物

\n

B.每个亚油酸分子中含有个原子

\n

C.亚油酸中碳.氧元素的质量比为

\n

D.每个亚油酸分子中含有个碳原子、个氢原子、个氧分子

\n

【答案】

\n

C

\n

【解析】

\n

A.由化学式可知,亚油酸是由三种元素组成的化合物,A错误。B.每个亚油酸分子中含有个原子,B错误。C.亚油酸中碳.氧元素的质量比为,C正确。D.每个亚油酸分子中含有个碳原子、个氢原子、个氧原子,D错误。故选:C。

\n # """ # print(html) # html = "\n1、已知集合M满足{1,2}≤M≤{1,2,5,6,7},则\n符合条件的集合M有__个." res1 = WordParseStructure(html, "664597dd71453ba19c20977f", is_reparse=0, must_latex=0, source="ai", subject="物理")() # new_fpath = os.path.join(r"F:\zwj\Text_Structure\fail_files", "res1.html") # re_f = open(new_fpath, 'a+', encoding='utf-8') # for i in res1[0]["items"]: # re_f.write(str(i)) # pprint(res1) pprint(res1[0]['items']) print('题目数量:', len(res1[0]["items"])) # new_fpath = r"F:\zwj\Text_Structure\new_tiku_structure_2021\res_folder\10-28.json" # re_f = open(new_fpath, 'w', encoding='utf-8') # json.dump(res1, re_f, ensure_ascii=False) # mongo = Mongo() # data = mongo.get_data_info({"_id": ObjectId("5fc64c9c4994183dda7e75b2")}) # # pprint(data["item_ocr"]) # res1 = WordParseStructure(data["item_ocr"], images_url1).structure() # print(res1) # print('题目数量:', len(res1[0]["items"])) # 6837 序号有些乱 6836 图片位置和格式有问题 # 6822 16A、和16B、类型的序号怎么处理 'item_id'有int和 str 型,须统一处理下 # 6820 答案页没有明显标识 # 14.html 只有答案,没有题干 # 21.html 多套题目在一起,多个从1开始的序号,最后一道题,把后面题目都放在一起了,需要判断一下吗? # import json # re_f = open("207.txt", 'w', encoding='utf-8') # json.dump(res1[0], re_f) # json文件 # for file in os.listdir(r"F:\zwj\Text_Structure\fail_files"): # path1 = os.path.join(r"F:\zwj\Text_Structure\fail_files", file) # # path1 = r"F:\zwj\Text_Structure\fail_files\89a6911f57bf89aba898651b27d2a2fc__2021_04_09_18_50_19.json" # with open(path1,'r',encoding='utf-8') as f: # html= json.load(f) # pprint(html) # # try: # # res1 = WordParseStructure(html, "").structure() # # os.remove(path1) # # except: # # pass # res1 = WordParseStructure(html, "").structure() # pprint(res1) # print('题目数量:', len(res1[0]["items"]))