123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283 |
- #!/usr/bin/env/python
- # -*- coding:utf-8 -*-
- from pprint import pprint
- # from utils.exam_type import get_exam_type
- from structure.final_structure import one_item_structure
- from utils.stem_ans_split import get_split_pos
- from utils.washutil import *
- from structure.three_parse_structure import *
- from utils.pic_pos_judge import img_regroup
- from structure.paper_text_structure import WordParseStructure
- from func_timeout import func_set_timeout
- from utils.xuanzuoti2slave import toslave_bef, toslave_aft
- paper_types = ["第三种试卷格式:题目与答案分开",
- "第二种试卷格式: 不同时含有或都不含有{答案}和{解析}关键字",
- "第一种试卷格式:教师用卷,含答案和解析关键字"]
- class StructureExporter(WordParseStructure):
- """
- 基于wordbin出来的html结果进一步做 试卷类型 非模板结构化
- """
- def img_repl(self, one_dict):
- """
- 初步拆分题目后,图片信息的替换
- :return:
- """
- imgs = {s: re.findall("<img.*?/>", one_dict[s]) for s in ['stem', 'key', 'parse', 'com_stem'] if s in one_dict}
- for k, imgs_seq in imgs.items():
- for img in imgs_seq:
- img = re.sub("(?<!\s)(w_h|data-latex)=", r" \1=", img)
- one_dict[k] = one_dict[k].replace(img, self.subs2src[img])
- # if type(self.img_url) == str and self.img_url:
- # one_dict[k] = re.sub(r'<img src="files/', '<img src="' + str(self.img_url), str(one_dict[k]))
- if "analy" in one_dict:
- for img in re.findall("<img.*?/>", one_dict["analy"]):
- img = re.sub("(?<!\s)(w_h|data-latex)=", r" \1=", img)
- one_dict["analy"] = one_dict["analy"].replace(img, self.subs2src[img])
- return one_dict
- # @func_set_timeout(30)
- def export(self):
- """结构化入口"""
- if not self.row_list:
- return {"errcode": 1, "errmsgs": "题文没有有效信息", "data": {}}, ""
- # print(self.row_list)
- # 判断考试类型
- # paper_other_info = get_exam_type(row_list)
- # 第二步:寻找题目和答案的切分点,一定要有“答案”关键字
- split_res = get_split_pos(self.row_list)
- if type(split_res) == str:
- return {"errcode": 1, "errmsgs": split_res, "data": {}}, paper_types[0]
- row_list, items_list, ans_list, is_may_ans = split_res
- rd2_is_fail = 0
- rd1_may_fail = 0
- item_res, paper_type, item_no_type = "", "", 1
- item_groups, ans_groups = {}, {}
- if "【答案】" in "".join(items_list) or "【解析】" in "".join(items_list):
- rd1_may_fail = 1
- else:
- if items_list:
- paper_type = paper_types[0]
- reform_res = items_ans_reform(items_list, ans_list, self.subject)
- if type(reform_res) == str:
- return {"errcode": 1, "errmsgs": reform_res, "data": {}}, paper_type
- else:
- # item_res = reform_res
- if len(reform_res) == 2:
- item_res, item_no_type = reform_res
- else:
- item_res, item_no_type, rd2_is_fail, item_groups = reform_res
- if not items_list or rd1_may_fail or (is_may_ans and rd2_is_fail):
- ans_n = re.findall("【答案】", "\n".join(row_list))
- parse_n = len(re.findall("【解析】", "\n".join(row_list)))
- if self.subject not in ["地理", "语文"] and ans_n and len(ans_n) == parse_n > 10: # 带相同个数的答案和解析
- paper_type = paper_types[2]
- item_res = split_by_keywords(row_list, self.subject)
- if type(item_res) == str and re.search("格式有误|没有换行|题型不明确|题型行格式有问题", item_res):
- print("第一种试卷格式解析格式有误")
- try:
- paper_type = paper_types[1]
- item_res = split_by_topicno(row_list, self.subject)
- except:
- return {"errcode": 1, "errmsgs": item_res, "data": {}}, paper_type
- else:
- paper_type = paper_types[1]
- item_res = split_by_topicno(row_list, self.subject)
- item_list = []
- if type(item_res) == str:
- return {"errcode": 1, "errmsgs": item_res, "data": {}}, paper_type
- else:
- if type(item_res) == tuple:
- if len(item_res) == 2:
- item_list, item_no_type = item_res
- else:
- item_list, item_no_type, item_groups, ans_groups = item_res
- elif type(item_res) == list:
- item_list = item_res
- # pprint(item_list)
- print('****************初步切分题目的个数*****************', len(item_list))
- res = []
- if item_list:
- item_list = img_regroup(item_list, row_list) # 图片重组判断
- if self.subs2src:
- item_list = list(map(self.img_repl, item_list)) # 图片信息替换还原
- # ---------初步拆分题目错误判断--------------------
- # ---------新题型进一步拆分--------------------
- # new_item = [[k, i] for k, i in enumerate(item_list) if re.search("选[修学]", i["stem"][:10])]
- # have_slave = 0
- # to_slave = []
- # if new_item:
- # try:
- # have_slave = 1
- # for one in new_item:
- # new_res = toslave_bef(one[1])
- # if type(new_res) == list:
- # to_slave.extend(new_res)
- # item_list.remove(one[1])
- # else:
- # item_list[one[0]] = new_res
- # except:
- # pass
- # if to_slave:
- # item_list.extend(to_slave)
- # ==========小题结构化========
- # from multiprocessing.dummy import Pool as ThreadPool
- # pool = ThreadPool(2) # 比# pool = multiprocessing.Pool(3)速度快
- # pprint(item_list)
- consumer = ['toslave'] * len(item_list) # noslave
- items_no_type = [item_no_type] * len(item_list)
- sj = [self.subject] * len(item_list)
- xyz = zip(item_list, consumer, items_no_type, sj, [0] * len(item_list))
- # res = list(pool.map(one_item_structure, xyz))
- res = list(map(one_item_structure, xyz)) # 和多进程相比,这样速度也很快
- # ==========最后的清洗=========
- # pprint(res)
- res = wash_after(res, item_groups, ans_groups, self.subject)
- # if have_slave and not to_slave:
- # res = list(map(toslave_aft, res))
- # 结果返回
- if self.is_reparse:
- return {"html": self.new_html, "items": res}, paper_type
- else:
- return {"items": res}, paper_type
- @staticmethod
- def _get_all_errors(res):
- """
- 整套试卷结构化完成以后,把所有报错放在一个list里面:
- all_errors = [{"单选题第1题目":[]},{"解答题第2题":[]},{},{}]
- :param res:
- :return:
- """
- type_names = []
- errmgs = []
- spliterr_point = []
- for one_res in res:
- type_names.append(one_res["type"])
- if "text_errmsgs" in one_res:
- errmgs.append(one_res["text_errmsgs"])
- else:
- errmgs.append("")
- if 'spliterr_point' in one_res:
- spliterr_point.append(one_res['spliterr_point'])
- # 给同种题型的名字重新编码
- new_names = []
- for k, v in enumerate(type_names):
- if v:
- nums = str(type_names[:k]).count(v)
- else:
- nums = k
- if spliterr_point:
- add_n = insert_sort2get_idx(spliterr_point, k+1)
- new_names.append("{}第{}题(在整份word中的序号为{}题)".format(v, nums + 1 + add_n, k + 1 + add_n))
- else:
- new_names.append("{}第{}题(在整份word中的序号为{}题)".format(v, nums + 1, k + 1))
- all_errors = []
- for name, error in zip(new_names, errmgs):
- if len(error) > 0:
- all_errors.append({name: error})
- return all_errors
- if __name__ == '__main__':
- # 单份试卷测试
- import json
- from bson.objectid import ObjectId
- # path1 = r"F:\zwj\parse_2021\data\fail\2\2.txt"
- # path = r"F:\zwj\parse_2021\res_folder\13.html"
- # images_url1 = "" # "http://49.233.23.58:11086/ser_static/4439/files/"
- # html = "<p>"+"</p>\n<p>".join(html.split("\n"))+"</p>"
- # with open(r"F:\zwj\Text_Structure\fail_files3\c5e222c5fbded2a2264ae002907fc92c__2021_04_16_18_43_23.json", 'r') as load_f:
- # html = json.load(load_f)
- # print(load_dict)
- # path2 = r"G:\zwj\WL\Text_Structure\new_tiku_structure_v3_art\data\地理\3\安徽高三地理.html"
- # path2 = r"G:\zwj\WL\Text_Structure\new_tiku_structure_v3_art\data\地理\2\gd1.html"
- # path2 = r"G:\zwj\WL\Text_Structure\new_tiku_structure_v3_art\data\地理\shuguang.html"
- # path2 = r"G:\zwj\WL\Text_Structure\new_tiku_structure_v3_art\data\huaxue\huexue2.html"
- # path2 = r"F:\zwj\new_word_text_extract_2021\data\phy_clean.html"
- # path2 = r"G:\zwj\Word2Html\data\yuwen\yuwen1.html"
- # path2 = r"F:\zwj\Text_Structure\new_tiku_structure_v3_art\data\语文\bj.html"
- # path2 = r"G:\zwj\WL\Text_Structure\new_tiku_structure_v3_art\data\语文\2\tianjin.html"
- # path2 = r"G:\zwj\WL\Text_Structure\new_tiku_structure_v3_art\data\语文\2\61c5380666e78ea2a20b4ff0.html"
- # path2 = r"G:\zwj\WL\Text_Structure\new_tiku_structure_v3_art\data\政治\jiexi_bj.html"
- # path2 = r"F:\zwj\Text_Structure\accept_files\62a2f9b9765759d85567a3e4.html"
- # 6239991e6ca622396925f66b 624cf82d12cd45a7836f3430 626b4b1f81b582c0470d01b0
- # 627b64b0814132f0d7b12589 627b622981b582c0470d020e
- # 6294326cf84c0e279ac6484e.html 62903acaf84c0e279ac647fb
- path2 = r"C:\Users\Administrator\Desktop\666a67fec3c4da9e7009b531.html"
- path2 = r"C:\Users\Administrator\Desktop\66459c62c3c4da9e7009ae9d.html"
- # path2 = r"F:\zwj\Text_Structure\accept_files\62aae86a765759d85567a475.html"
- html = open(path2, "r", encoding="utf-8").read()
- # html = json.loads(html) 621845626ca622396925f55c
- html2 = """
- 1. I’m anxious___________ your injury.Are you feeling any better now?
- 2. After he was back on his feet, he was anxious___________ (return) to school as soon as possible.
- 3. Helen was ___________ to death when she saw the ___________scene.She hid herself in the corner, shaking with___________(fright).
- 4. The music outside___________ (annoy) Tom soon. He couldn’t keep his___________ (concentrate) with such ___________ (annoy) music going on yesterday.
- 5. With so many people talking around, he still concentrated ___________ doing his homework,which left a deep impression on me.
- 6. The result was far beyond ___________ we had expected, which brought great joy to every one of us.
- 7. If the dress doesn’t fit you, take it back and you can exchange it ___________ another one of the same price.
- 8. The dictionary is out of date:many words have been added ___________ the language since it came out.
- 9. This vacation I went to an island on the Pacific and ___________ by its scenery. The island has left a lasting ___________ on me.
- 10. We are confident about the future and will never lose our confidence ___________ the achievements we will make.
- 11. He has worked for nearly 20 years, so he is senior ___________ most of his workmates.
- 12. Although he is three years junior ___________ me, he has more work experience.
- """
- res1 = StructureExporter(html, "202406131725", "语文", 1).export()
- # new_fpath = os.path.join(r"G:\zwj\WL\Text_Structure\fail_files", "res_政治.json")
- # re_f = open(new_fpath, 'w', encoding='utf-8')
- # json.dump(res1[0]["items"], re_f, ensure_ascii=False)
- # for i in res1[0]["items"]:
- # re_f.write(str(i))
- pprint(res1[0]["items"])
- print('题目数量:', len(res1[0]["items"]))
- # new_fpath = r"F:\zwj\Text_Structure\new_tiku_structure_2021\res_folder\10-28.json"
- # re_f = open(new_fpath, 'w', encoding='utf-8')
- # json.dump(res1, re_f, ensure_ascii=False)
- # mongo = Mongo()
- # data = mongo.get_data_info({"_id": ObjectId("5fc64c9c4994183dda7e75b2")})
- # # pprint(data["item_ocr"])
- # res1 = WordParseStructure(data["item_ocr"], images_url1).structure()
- # print(res1)
- # print('题目数量:', len(res1[0]["items"]))
- # 6837 序号有些乱 6836 图片位置和格式有问题
- # 6822 16A、和16B、类型的序号怎么处理 'item_id'有int和 str 型,须统一处理下
- # 6820 答案页没有明显标识
- # 14.html 只有答案,没有题干
- # 21.html 多套题目在一起,多个从1开始的序号,最后一道题,把后面题目都放在一起了,需要判断一下吗?
- # import json
- # re_f = open("207.txt", 'w', encoding='utf-8')
- # json.dump(res1[0], re_f)
- # json文件
- # for file in os.listdir(r"F:\zwj\Text_Structure\fail_files"):
- # path1 = os.path.join(r"F:\zwj\Text_Structure\fail_files", file)
- # # path1 = r"F:\zwj\Text_Structure\fail_files\89a6911f57bf89aba898651b27d2a2fc__2021_04_09_18_50_19.json"
- # with open(path1,'r',encoding='utf-8') as f:
- # html= json.load(f)
- # pprint(html)
- # # try:
- # # res1 = WordParseStructure(html, "").structure()
- # # os.remove(path1)
- # # except:
- # # pass
- # res1 = WordParseStructure(html, "").structure()
- # pprint(res1)
- # print('题目数量:', len(res1[0]["items"]))
|