structure_main.py 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283
  1. #!/usr/bin/env/python
  2. # -*- coding:utf-8 -*-
  3. from pprint import pprint
  4. # from utils.exam_type import get_exam_type
  5. from structure.final_structure import one_item_structure
  6. from utils.stem_ans_split import get_split_pos
  7. from utils.washutil import *
  8. from structure.three_parse_structure import *
  9. from utils.pic_pos_judge import img_regroup
  10. from structure.paper_text_structure import WordParseStructure
  11. from func_timeout import func_set_timeout
  12. from utils.xuanzuoti2slave import toslave_bef, toslave_aft
  13. paper_types = ["第三种试卷格式:题目与答案分开",
  14. "第二种试卷格式: 不同时含有或都不含有{答案}和{解析}关键字",
  15. "第一种试卷格式:教师用卷,含答案和解析关键字"]
  16. class StructureExporter(WordParseStructure):
  17. """
  18. 基于wordbin出来的html结果进一步做 试卷类型 非模板结构化
  19. """
  20. def img_repl(self, one_dict):
  21. """
  22. 初步拆分题目后,图片信息的替换
  23. :return:
  24. """
  25. imgs = {s: re.findall("<img.*?/>", one_dict[s]) for s in ['stem', 'key', 'parse', 'com_stem'] if s in one_dict}
  26. for k, imgs_seq in imgs.items():
  27. for img in imgs_seq:
  28. img = re.sub("(?<!\s)(w_h|data-latex)=", r" \1=", img)
  29. one_dict[k] = one_dict[k].replace(img, self.subs2src[img])
  30. # if type(self.img_url) == str and self.img_url:
  31. # one_dict[k] = re.sub(r'<img src="files/', '<img src="' + str(self.img_url), str(one_dict[k]))
  32. if "analy" in one_dict:
  33. for img in re.findall("<img.*?/>", one_dict["analy"]):
  34. img = re.sub("(?<!\s)(w_h|data-latex)=", r" \1=", img)
  35. one_dict["analy"] = one_dict["analy"].replace(img, self.subs2src[img])
  36. return one_dict
  37. # @func_set_timeout(30)
  38. def export(self):
  39. """结构化入口"""
  40. if not self.row_list:
  41. return {"errcode": 1, "errmsgs": "题文没有有效信息", "data": {}}, ""
  42. # print(self.row_list)
  43. # 判断考试类型
  44. # paper_other_info = get_exam_type(row_list)
  45. # 第二步:寻找题目和答案的切分点,一定要有“答案”关键字
  46. split_res = get_split_pos(self.row_list)
  47. if type(split_res) == str:
  48. return {"errcode": 1, "errmsgs": split_res, "data": {}}, paper_types[0]
  49. row_list, items_list, ans_list, is_may_ans = split_res
  50. rd2_is_fail = 0
  51. rd1_may_fail = 0
  52. item_res, paper_type, item_no_type = "", "", 1
  53. item_groups, ans_groups = {}, {}
  54. if "【答案】" in "".join(items_list) or "【解析】" in "".join(items_list):
  55. rd1_may_fail = 1
  56. else:
  57. if items_list:
  58. paper_type = paper_types[0]
  59. reform_res = items_ans_reform(items_list, ans_list, self.subject)
  60. if type(reform_res) == str:
  61. return {"errcode": 1, "errmsgs": reform_res, "data": {}}, paper_type
  62. else:
  63. # item_res = reform_res
  64. if len(reform_res) == 2:
  65. item_res, item_no_type = reform_res
  66. else:
  67. item_res, item_no_type, rd2_is_fail, item_groups = reform_res
  68. if not items_list or rd1_may_fail or (is_may_ans and rd2_is_fail):
  69. ans_n = re.findall("【答案】", "\n".join(row_list))
  70. parse_n = len(re.findall("【解析】", "\n".join(row_list)))
  71. if self.subject not in ["地理", "语文"] and ans_n and len(ans_n) == parse_n > 10: # 带相同个数的答案和解析
  72. paper_type = paper_types[2]
  73. item_res = split_by_keywords(row_list, self.subject)
  74. if type(item_res) == str and re.search("格式有误|没有换行|题型不明确|题型行格式有问题", item_res):
  75. print("第一种试卷格式解析格式有误")
  76. try:
  77. paper_type = paper_types[1]
  78. item_res = split_by_topicno(row_list, self.subject)
  79. except:
  80. return {"errcode": 1, "errmsgs": item_res, "data": {}}, paper_type
  81. else:
  82. paper_type = paper_types[1]
  83. item_res = split_by_topicno(row_list, self.subject)
  84. item_list = []
  85. if type(item_res) == str:
  86. return {"errcode": 1, "errmsgs": item_res, "data": {}}, paper_type
  87. else:
  88. if type(item_res) == tuple:
  89. if len(item_res) == 2:
  90. item_list, item_no_type = item_res
  91. else:
  92. item_list, item_no_type, item_groups, ans_groups = item_res
  93. elif type(item_res) == list:
  94. item_list = item_res
  95. # pprint(item_list)
  96. print('****************初步切分题目的个数*****************', len(item_list))
  97. res = []
  98. if item_list:
  99. item_list = img_regroup(item_list, row_list) # 图片重组判断
  100. if self.subs2src:
  101. item_list = list(map(self.img_repl, item_list)) # 图片信息替换还原
  102. # ---------初步拆分题目错误判断--------------------
  103. # ---------新题型进一步拆分--------------------
  104. # new_item = [[k, i] for k, i in enumerate(item_list) if re.search("选[修学]", i["stem"][:10])]
  105. # have_slave = 0
  106. # to_slave = []
  107. # if new_item:
  108. # try:
  109. # have_slave = 1
  110. # for one in new_item:
  111. # new_res = toslave_bef(one[1])
  112. # if type(new_res) == list:
  113. # to_slave.extend(new_res)
  114. # item_list.remove(one[1])
  115. # else:
  116. # item_list[one[0]] = new_res
  117. # except:
  118. # pass
  119. # if to_slave:
  120. # item_list.extend(to_slave)
  121. # ==========小题结构化========
  122. # from multiprocessing.dummy import Pool as ThreadPool
  123. # pool = ThreadPool(2) # 比# pool = multiprocessing.Pool(3)速度快
  124. # pprint(item_list)
  125. consumer = ['toslave'] * len(item_list) # noslave
  126. items_no_type = [item_no_type] * len(item_list)
  127. sj = [self.subject] * len(item_list)
  128. xyz = zip(item_list, consumer, items_no_type, sj, [0] * len(item_list))
  129. # res = list(pool.map(one_item_structure, xyz))
  130. res = list(map(one_item_structure, xyz)) # 和多进程相比,这样速度也很快
  131. # ==========最后的清洗=========
  132. # pprint(res)
  133. res = wash_after(res, item_groups, ans_groups, self.subject)
  134. # if have_slave and not to_slave:
  135. # res = list(map(toslave_aft, res))
  136. # 结果返回
  137. if self.is_reparse:
  138. return {"html": self.new_html, "items": res}, paper_type
  139. else:
  140. return {"items": res}, paper_type
  141. @staticmethod
  142. def _get_all_errors(res):
  143. """
  144. 整套试卷结构化完成以后,把所有报错放在一个list里面:
  145. all_errors = [{"单选题第1题目":[]},{"解答题第2题":[]},{},{}]
  146. :param res:
  147. :return:
  148. """
  149. type_names = []
  150. errmgs = []
  151. spliterr_point = []
  152. for one_res in res:
  153. type_names.append(one_res["type"])
  154. if "text_errmsgs" in one_res:
  155. errmgs.append(one_res["text_errmsgs"])
  156. else:
  157. errmgs.append("")
  158. if 'spliterr_point' in one_res:
  159. spliterr_point.append(one_res['spliterr_point'])
  160. # 给同种题型的名字重新编码
  161. new_names = []
  162. for k, v in enumerate(type_names):
  163. if v:
  164. nums = str(type_names[:k]).count(v)
  165. else:
  166. nums = k
  167. if spliterr_point:
  168. add_n = insert_sort2get_idx(spliterr_point, k+1)
  169. new_names.append("{}第{}题(在整份word中的序号为{}题)".format(v, nums + 1 + add_n, k + 1 + add_n))
  170. else:
  171. new_names.append("{}第{}题(在整份word中的序号为{}题)".format(v, nums + 1, k + 1))
  172. all_errors = []
  173. for name, error in zip(new_names, errmgs):
  174. if len(error) > 0:
  175. all_errors.append({name: error})
  176. return all_errors
  177. if __name__ == '__main__':
  178. # 单份试卷测试
  179. import json
  180. from bson.objectid import ObjectId
  181. # path1 = r"F:\zwj\parse_2021\data\fail\2\2.txt"
  182. # path = r"F:\zwj\parse_2021\res_folder\13.html"
  183. # images_url1 = "" # "http://49.233.23.58:11086/ser_static/4439/files/"
  184. # html = "<p>"+"</p>\n<p>".join(html.split("\n"))+"</p>"
  185. # with open(r"F:\zwj\Text_Structure\fail_files3\c5e222c5fbded2a2264ae002907fc92c__2021_04_16_18_43_23.json", 'r') as load_f:
  186. # html = json.load(load_f)
  187. # print(load_dict)
  188. # path2 = r"G:\zwj\WL\Text_Structure\new_tiku_structure_v3_art\data\地理\3\安徽高三地理.html"
  189. # path2 = r"G:\zwj\WL\Text_Structure\new_tiku_structure_v3_art\data\地理\2\gd1.html"
  190. # path2 = r"G:\zwj\WL\Text_Structure\new_tiku_structure_v3_art\data\地理\shuguang.html"
  191. # path2 = r"G:\zwj\WL\Text_Structure\new_tiku_structure_v3_art\data\huaxue\huexue2.html"
  192. # path2 = r"F:\zwj\new_word_text_extract_2021\data\phy_clean.html"
  193. # path2 = r"G:\zwj\Word2Html\data\yuwen\yuwen1.html"
  194. # path2 = r"F:\zwj\Text_Structure\new_tiku_structure_v3_art\data\语文\bj.html"
  195. # path2 = r"G:\zwj\WL\Text_Structure\new_tiku_structure_v3_art\data\语文\2\tianjin.html"
  196. # path2 = r"G:\zwj\WL\Text_Structure\new_tiku_structure_v3_art\data\语文\2\61c5380666e78ea2a20b4ff0.html"
  197. # path2 = r"G:\zwj\WL\Text_Structure\new_tiku_structure_v3_art\data\政治\jiexi_bj.html"
  198. # path2 = r"F:\zwj\Text_Structure\accept_files\62a2f9b9765759d85567a3e4.html"
  199. # 6239991e6ca622396925f66b 624cf82d12cd45a7836f3430 626b4b1f81b582c0470d01b0
  200. # 627b64b0814132f0d7b12589 627b622981b582c0470d020e
  201. # 6294326cf84c0e279ac6484e.html 62903acaf84c0e279ac647fb
  202. path2 = r"C:\Users\Administrator\Desktop\666a67fec3c4da9e7009b531.html"
  203. path2 = r"C:\Users\Administrator\Desktop\66459c62c3c4da9e7009ae9d.html"
  204. # path2 = r"F:\zwj\Text_Structure\accept_files\62aae86a765759d85567a475.html"
  205. html = open(path2, "r", encoding="utf-8").read()
  206. # html = json.loads(html) 621845626ca622396925f55c
  207. html2 = """
  208. 1. I’m anxious___________ your injury.Are you feeling any better now?
  209. 2. After he was back on his feet, he was anxious___________ (return) to school as soon as possible.
  210. 3. Helen was ___________ to death when she saw the ___________scene.She hid herself in the corner, shaking with___________(fright).
  211. 4. The music outside___________ (annoy) Tom soon. He couldn’t keep his___________ (concentrate) with such ___________ (annoy) music going on yesterday.
  212. 5. With so many people talking around, he still concentrated ___________ doing his homework,which left a deep impression on me.
  213. 6. The result was far beyond ___________ we had expected, which brought great joy to every one of us.
  214. 7. If the dress doesn’t fit you, take it back and you can exchange it ___________ another one of the same price.
  215. 8. The dictionary is out of date:many words have been added ___________ the language since it came out.
  216. 9. This vacation I went to an island on the Pacific and ___________ by its scenery. The island has left a lasting ___________ on me.
  217. 10. We are confident about the future and will never lose our confidence ___________ the achievements we will make.
  218. 11. He has worked for nearly 20 years, so he is senior ___________ most of his workmates.
  219. 12. Although he is three years junior ___________ me, he has more work experience.
  220. """
  221. res1 = StructureExporter(html, "202406131725", "语文", 1).export()
  222. # new_fpath = os.path.join(r"G:\zwj\WL\Text_Structure\fail_files", "res_政治.json")
  223. # re_f = open(new_fpath, 'w', encoding='utf-8')
  224. # json.dump(res1[0]["items"], re_f, ensure_ascii=False)
  225. # for i in res1[0]["items"]:
  226. # re_f.write(str(i))
  227. pprint(res1[0]["items"])
  228. print('题目数量:', len(res1[0]["items"]))
  229. # new_fpath = r"F:\zwj\Text_Structure\new_tiku_structure_2021\res_folder\10-28.json"
  230. # re_f = open(new_fpath, 'w', encoding='utf-8')
  231. # json.dump(res1, re_f, ensure_ascii=False)
  232. # mongo = Mongo()
  233. # data = mongo.get_data_info({"_id": ObjectId("5fc64c9c4994183dda7e75b2")})
  234. # # pprint(data["item_ocr"])
  235. # res1 = WordParseStructure(data["item_ocr"], images_url1).structure()
  236. # print(res1)
  237. # print('题目数量:', len(res1[0]["items"]))
  238. # 6837 序号有些乱 6836 图片位置和格式有问题
  239. # 6822 16A、和16B、类型的序号怎么处理 'item_id'有int和 str 型,须统一处理下
  240. # 6820 答案页没有明显标识
  241. # 14.html 只有答案,没有题干
  242. # 21.html 多套题目在一起,多个从1开始的序号,最后一道题,把后面题目都放在一起了,需要判断一下吗?
  243. # import json
  244. # re_f = open("207.txt", 'w', encoding='utf-8')
  245. # json.dump(res1[0], re_f)
  246. # json文件
  247. # for file in os.listdir(r"F:\zwj\Text_Structure\fail_files"):
  248. # path1 = os.path.join(r"F:\zwj\Text_Structure\fail_files", file)
  249. # # path1 = r"F:\zwj\Text_Structure\fail_files\89a6911f57bf89aba898651b27d2a2fc__2021_04_09_18_50_19.json"
  250. # with open(path1,'r',encoding='utf-8') as f:
  251. # html= json.load(f)
  252. # pprint(html)
  253. # # try:
  254. # # res1 = WordParseStructure(html, "").structure()
  255. # # os.remove(path1)
  256. # # except:
  257. # # pass
  258. # res1 = WordParseStructure(html, "").structure()
  259. # pprint(res1)
  260. # print('题目数量:', len(res1[0]["items"]))