From cf63cecd1a73bbcfec20694d6109fbd6c3db4a15 Mon Sep 17 00:00:00 2001 From: Michael Jones Date: Sat, 13 May 2023 18:13:48 +0100 Subject: [PATCH 01/65] Fix mistake in spelling of inheritance As pointed out by @kielbasi. --- breathe/parser/compoundsuper.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/breathe/parser/compoundsuper.py b/breathe/parser/compoundsuper.py index 52b8be9e..79050edd 100644 --- a/breathe/parser/compoundsuper.py +++ b/breathe/parser/compoundsuper.py @@ -484,7 +484,7 @@ def buildChildren(self, child_, nodeName_): elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'inheritancegraph': obj_ = graphType.factory( - caption=f"Inheritence diagram for {self.get_compoundname()}:" + caption=f"Inheritance diagram for {self.get_compoundname()}:" ) obj_.build(child_) self.set_inheritancegraph(obj_) From 15a0fe9cd1396eb08a2d48fbb1ba4967aed79cbc Mon Sep 17 00:00:00 2001 From: jce Date: Mon, 19 Jun 2023 17:02:37 +0200 Subject: [PATCH 02/65] Support member references in compound --- breathe/parser/compoundsuper.py | 52 ++++++++++++++++++++++++++++++++- 1 file changed, 51 insertions(+), 1 deletion(-) diff --git a/breathe/parser/compoundsuper.py b/breathe/parser/compoundsuper.py index 52b8be9e..47addbea 100644 --- a/breathe/parser/compoundsuper.py +++ b/breathe/parser/compoundsuper.py @@ -513,6 +513,48 @@ def buildChildren(self, child_, nodeName_): # end class compounddefType +class MemberType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, kind=None, refid=None, name=None): + self.kind = kind + self.refid = refid + self.name = name + def factory(*args_, **kwargs_): + if MemberType.subclass: + return MemberType.subclass(*args_, **kwargs_) + else: + return MemberType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_name(self): return self.name + def set_name(self, name): self.name = name + def get_kind(self): return self.kind + def set_kind(self, kind): self.kind = kind + def get_refid(self): return self.refid + def set_refid(self, refid): self.refid = refid + def hasContent_(self): + return self.name is not None + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('kind'): + self.kind = attrs.get('kind').value + if attrs.get('refid'): + self.refid = attrs.get('refid').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'name': + name_ = '' + for text__content_ in child_.childNodes: + name_ += text__content_.nodeValue + self.name = name_ +# end class MemberType + + class listofallmembersType(GeneratedsSuper): subclass = None superclass = None @@ -989,7 +1031,7 @@ def buildChildren(self, child_, nodeName_): class sectiondefType(GeneratedsSuper): subclass = None superclass = None - def __init__(self, kind=None, header=None, description=None, memberdef=None): + def __init__(self, kind=None, header=None, description=None, memberdef=None, member=None): self.kind = kind self.header = header self.description = description @@ -997,6 +1039,10 @@ def __init__(self, kind=None, header=None, description=None, memberdef=None): self.memberdef = [] else: self.memberdef = memberdef + if member is None: + self.member = [] + else: + self.member = member def factory(*args_, **kwargs_): if sectiondefType.subclass: return sectiondefType.subclass(*args_, **kwargs_) @@ -1011,6 +1057,10 @@ def get_memberdef(self): return self.memberdef def set_memberdef(self, memberdef): self.memberdef = memberdef def add_memberdef(self, value): self.memberdef.append(value) def insert_memberdef(self, index, value): self.memberdef[index] = value + def get_member(self): return self.member + def set_member(self, member): self.member = member + def add_member(self, value): self.member.append(value) + def insert_member(self, index, value): self.member[index] = value def get_kind(self): return self.kind def set_kind(self, kind): self.kind = kind def hasContent_(self): From d7160fe6f747a51944274fe607bfd5cac98cbbe6 Mon Sep 17 00:00:00 2001 From: jce Date: Tue, 20 Jun 2023 13:20:39 +0200 Subject: [PATCH 03/65] Store member nodes --- breathe/parser/compoundsuper.py | 34 +++++++++++++++++---------------- 1 file changed, 18 insertions(+), 16 deletions(-) diff --git a/breathe/parser/compoundsuper.py b/breathe/parser/compoundsuper.py index 47addbea..006b249b 100644 --- a/breathe/parser/compoundsuper.py +++ b/breathe/parser/compoundsuper.py @@ -1082,22 +1082,24 @@ def buildAttributes(self, attrs): if attrs.get('kind'): self.kind = attrs.get('kind').value def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'header': - header_ = '' - for text__content_ in child_.childNodes: - header_ += text__content_.nodeValue - self.header = header_ - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'description': - obj_ = descriptionType.factory() - obj_.build(child_) - self.set_description(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'memberdef': - obj_ = memberdefType.factory() - obj_.build(child_) - self.memberdef.append(obj_) + if child_.nodeType == Node.ELEMENT_NODE: + if nodeName_ == 'header': + header_ = '' + for text__content_ in child_.childNodes: + header_ += text__content_.nodeValue + self.header = header_ + elif nodeName_ == 'description': + obj_ = descriptionType.factory() + obj_.build(child_) + self.set_description(obj_) + elif nodeName_ == 'memberdef': + obj_ = memberdefType.factory() + obj_.build(child_) + self.memberdef.append(obj_) + elif nodeName_ == 'member': + obj_ = MemberType.factory() + obj_.build(child_) + self.member.append(obj_) # end class sectiondefType From 6ef8cf061c0d46d71daee6734f8b9e559e75fa7e Mon Sep 17 00:00:00 2001 From: jce Date: Wed, 21 Jun 2023 17:45:29 +0200 Subject: [PATCH 04/65] Support member references in compound --- breathe/finder/compound.py | 40 +++++++++++++++++++++++++++++++++++++- breathe/finder/factory.py | 12 +++++++++++- breathe/finder/index.py | 24 ++++++++++------------- breathe/parser/compound.py | 14 +++++++++++-- breathe/renderer/filter.py | 14 ++++++------- 5 files changed, 79 insertions(+), 25 deletions(-) diff --git a/breathe/finder/compound.py b/breathe/finder/compound.py index e38d2c6b..ed3df361 100644 --- a/breathe/finder/compound.py +++ b/breathe/finder/compound.py @@ -1,5 +1,13 @@ from breathe.finder import ItemFinder, stack -from breathe.renderer.filter import Filter +from breathe.parser.compound import compounddefTypeSub +from breathe.renderer.filter import Filter, FilterFactory +from breathe.parser import DoxygenCompoundParser + +from pprint import pprint + +from sphinx.application import Sphinx + +from typing import Any, List class DoxygenTypeSubItemFinder(ItemFinder): @@ -29,6 +37,12 @@ def filter_(self, ancestors, filter_: Filter, matches) -> None: class SectionDefTypeSubItemFinder(ItemFinder): + def __init__(self, app: Sphinx, compound_parser: DoxygenCompoundParser, *args): + super().__init__(*args) + + self.filter_factory = FilterFactory(app) + self.compound_parser = compound_parser + def filter_(self, ancestors, filter_: Filter, matches) -> None: """Find nodes which match the filter. Doesn't test this node, only its children""" @@ -40,6 +54,30 @@ def filter_(self, ancestors, filter_: Filter, matches) -> None: finder = self.item_finder_factory.create_finder(memberdef) finder.filter_(node_stack, filter_, matches) + # Descend to member children (Doxygen 1.9.7 or newer) + members = self.data_object.get_member() + # TODO: find a more precise type for the Doxygen nodes + member_matches: List[Any] = [] + for member in members: + member_finder = self.item_finder_factory.create_finder(member) + member_finder.filter_(node_stack, filter_, member_matches) + + # If there are members in this sectiondef that match the criteria + # then load up the file for the group they're in and get the member data objects + if member_matches: + matched_member_ids = (member.id for stack in matches for member in stack) + member_refid = member_matches[0][0].refid + filename = member_refid.rsplit('_', 1)[0] + file_data = self.compound_parser.parse(filename) + finder = self.item_finder_factory.create_finder(file_data) + for member_stack in member_matches: + member = member_stack[0] + if member.refid not in matched_member_ids: + ref_filter = self.filter_factory.create_id_filter( + "memberdef", member.refid + ) + finder.filter_(node_stack, ref_filter, matches) + class MemberDefTypeSubItemFinder(ItemFinder): def filter_(self, ancestors, filter_: Filter, matches) -> None: diff --git a/breathe/finder/factory.py b/breathe/finder/factory.py index 1440dae4..09fe6b64 100644 --- a/breathe/finder/factory.py +++ b/breathe/finder/factory.py @@ -20,6 +20,16 @@ def __call__(self, project_info: ProjectInfo, *args): return indexfinder.CompoundTypeSubItemFinder(self.app, compound_parser, project_info, *args) +class _CreateSectionDefTypeSubItemFinder: + def __init__(self, app: Sphinx, parser_factory: DoxygenParserFactory): + self.app = app + self.parser_factory = parser_factory + + def __call__(self, project_info: ProjectInfo, *args): + compound_parser = self.parser_factory.create_compound_parser(project_info) + return compoundfinder.SectionDefTypeSubItemFinder(self.app, compound_parser, project_info, *args) + + class DoxygenItemFinderFactory: def __init__(self, finders: Dict[str, Type[ItemFinder]], project_info: ProjectInfo): self.finders = finders @@ -65,7 +75,7 @@ def create_finder_from_root(self, root, project_info: ProjectInfo) -> Finder: "member": indexfinder.MemberTypeSubItemFinder, "doxygendef": compoundfinder.DoxygenTypeSubItemFinder, "compounddef": compoundfinder.CompoundDefTypeSubItemFinder, - "sectiondef": compoundfinder.SectionDefTypeSubItemFinder, + "sectiondef": _CreateSectionDefTypeSubItemFinder(self.app, self.parser_factory), # type: ignore "memberdef": compoundfinder.MemberDefTypeSubItemFinder, "ref": compoundfinder.RefTypeSubItemFinder, } diff --git a/breathe/finder/index.py b/breathe/finder/index.py index fef9f3e7..15009b80 100644 --- a/breathe/finder/index.py +++ b/breathe/finder/index.py @@ -47,22 +47,18 @@ def filter_(self, ancestors, filter_: Filter, matches) -> None: member_finder = self.item_finder_factory.create_finder(member) member_finder.filter_(node_stack, filter_, member_matches) + file_data = self.compound_parser.parse(self.data_object.refid) + finder = self.item_finder_factory.create_finder(file_data) + # If there are members in this compound that match the criteria # then load up the file for this compound and get the member data objects - if member_matches: - file_data = self.compound_parser.parse(self.data_object.refid) - finder = self.item_finder_factory.create_finder(file_data) - - for member_stack in member_matches: - ref_filter = self.filter_factory.create_id_filter( - "memberdef", member_stack[0].refid - ) - finder.filter_(node_stack, ref_filter, matches) - else: - # Read in the xml file referenced by the compound and descend into that as well - file_data = self.compound_parser.parse(self.data_object.refid) - finder = self.item_finder_factory.create_finder(file_data) - finder.filter_(node_stack, filter_, matches) + for member_stack in member_matches: + ref_filter = self.filter_factory.create_id_filter( + "memberdef", member_stack[0].refid + ) + finder.filter_(node_stack, ref_filter, matches) + # Read in the xml file referenced by the compound and descend into that as well + finder.filter_(node_stack, filter_, matches) class MemberTypeSubItemFinder(ItemFinder): diff --git a/breathe/parser/compound.py b/breathe/parser/compound.py index da6ee69b..6313c776 100644 --- a/breathe/parser/compound.py +++ b/breathe/parser/compound.py @@ -140,8 +140,8 @@ class sectiondefTypeSub(supermod.sectiondefType): node_type = "sectiondef" - def __init__(self, kind=None, header='', description=None, memberdef=None): - supermod.sectiondefType.__init__(self, kind, header, description, memberdef) + def __init__(self, kind=None, header='', description=None, memberdef=None, member=None): + supermod.sectiondefType.__init__(self, kind, header, description, memberdef, member) supermod.sectiondefType.subclass = sectiondefTypeSub @@ -237,6 +237,16 @@ def buildChildren(self, child_, nodeName_): # end class memberdefTypeSub +class MemberTypeSub(supermod.MemberType): + + node_type = "member" + + def __init__(self, kind=None, refid=None, name=''): + supermod.MemberType.__init__(self, kind, refid, name) +supermod.MemberType.subclass = MemberTypeSub +# end class MemberTypeSub + + class descriptionTypeSub(supermod.descriptionType): node_type = "description" diff --git a/breathe/renderer/filter.py b/breathe/renderer/filter.py index 31f90ce3..2a56f550 100644 --- a/breathe/renderer/filter.py +++ b/breathe/renderer/filter.py @@ -735,7 +735,7 @@ def _create_description_filter( def _create_public_members_filter(self, options: Dict[str, Any]) -> Filter: node = Node() - node_is_memberdef = node.node_type == "memberdef" + node_is_memberdef = (node.node_type == "memberdef") | (node.node_type == "member") node_is_public = node.prot == "public" parent = Parent() @@ -771,9 +771,8 @@ def _create_non_public_members_filter( self, prot: str, option_name: str, options: Dict[str, Any] ) -> Filter: """'prot' is the doxygen xml term for 'public', 'protected' and 'private' categories.""" - node = Node() - node_is_memberdef = node.node_type == "memberdef" + node_is_memberdef = (node.node_type == "memberdef") | (node.node_type == "member") node_is_public = node.prot == prot parent = Parent() @@ -790,7 +789,7 @@ def _create_non_public_members_filter( def _create_undoc_members_filter(self, options: Dict[str, Any]) -> Filter: node = Node() - node_is_memberdef = node.node_type == "memberdef" + node_is_memberdef = (node.node_type == "memberdef") | (node.node_type == "member") node_has_description = ( node.briefdescription.has_content() | node.detaileddescription.has_content() @@ -940,7 +939,7 @@ def create_content_filter(self, kind: str, options: Dict[str, Any]) -> Filter: node = Node() # Filter for public memberdefs - node_is_memberdef = node.node_type == "memberdef" + node_is_memberdef = (node.node_type == "memberdef") | (node.node_type == "member") node_is_public = node.prot == "public" public_members = node_is_memberdef & node_is_public @@ -1022,12 +1021,13 @@ def create_member_finder_filter(self, namespace: str, name: str, kind: str) -> F self.app.config.breathe_implementation_filename_extensions ) parent_is_compound = parent.node_type == "compound" + parent_is_sectiondef = parent.node_type == "sectiondef" parent_is_file = (parent.kind == "file") & (~is_implementation_file) parent_is_not_file = parent.kind != "file" return (parent_is_compound & parent_is_file & node_matches) | ( - parent_is_compound & parent_is_not_file & node_matches - ) + parent_is_compound & parent_is_not_file & node_matches) | ( + parent_is_sectiondef & parent_is_not_file & node_matches) def create_function_and_all_friend_finder_filter(self, namespace: str, name: str) -> Filter: parent = Parent() From d0fea4bc71fde3e39adc0f4ab41b84f86eff934e Mon Sep 17 00:00:00 2001 From: Jasper Craeghs Date: Wed, 28 Jun 2023 11:30:29 +0200 Subject: [PATCH 05/65] Remove unused import 'pprint' --- breathe/finder/compound.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/breathe/finder/compound.py b/breathe/finder/compound.py index ed3df361..c48cdbd1 100644 --- a/breathe/finder/compound.py +++ b/breathe/finder/compound.py @@ -3,8 +3,6 @@ from breathe.renderer.filter import Filter, FilterFactory from breathe.parser import DoxygenCompoundParser -from pprint import pprint - from sphinx.application import Sphinx from typing import Any, List From 542ae9b9ed6e5bd5e92ba2360b094df8931ba756 Mon Sep 17 00:00:00 2001 From: Michael Jones Date: Tue, 24 Oct 2023 09:23:51 +0100 Subject: [PATCH 06/65] Add github sponsorships to funding config --- .github/FUNDING.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml index 0709cc05..c287bfa6 100644 --- a/.github/FUNDING.yml +++ b/.github/FUNDING.yml @@ -1,6 +1,6 @@ # These are supported funding model platforms -github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2] +github: breathe-doc patreon: # Replace with a single Patreon username open_collective: breathe ko_fi: # Replace with a single Ko-fi username From d746a4b65883a5f0237ae9bf4b5cc4536ed73ec3 Mon Sep 17 00:00:00 2001 From: jce Date: Mon, 19 Jun 2023 17:02:37 +0200 Subject: [PATCH 07/65] Support member references in compound --- breathe/parser/compoundsuper.py | 52 ++++++++++++++++++++++++++++++++- 1 file changed, 51 insertions(+), 1 deletion(-) diff --git a/breathe/parser/compoundsuper.py b/breathe/parser/compoundsuper.py index 79050edd..74c59524 100644 --- a/breathe/parser/compoundsuper.py +++ b/breathe/parser/compoundsuper.py @@ -513,6 +513,48 @@ def buildChildren(self, child_, nodeName_): # end class compounddefType +class MemberType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, kind=None, refid=None, name=None): + self.kind = kind + self.refid = refid + self.name = name + def factory(*args_, **kwargs_): + if MemberType.subclass: + return MemberType.subclass(*args_, **kwargs_) + else: + return MemberType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_name(self): return self.name + def set_name(self, name): self.name = name + def get_kind(self): return self.kind + def set_kind(self, kind): self.kind = kind + def get_refid(self): return self.refid + def set_refid(self, refid): self.refid = refid + def hasContent_(self): + return self.name is not None + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('kind'): + self.kind = attrs.get('kind').value + if attrs.get('refid'): + self.refid = attrs.get('refid').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'name': + name_ = '' + for text__content_ in child_.childNodes: + name_ += text__content_.nodeValue + self.name = name_ +# end class MemberType + + class listofallmembersType(GeneratedsSuper): subclass = None superclass = None @@ -989,7 +1031,7 @@ def buildChildren(self, child_, nodeName_): class sectiondefType(GeneratedsSuper): subclass = None superclass = None - def __init__(self, kind=None, header=None, description=None, memberdef=None): + def __init__(self, kind=None, header=None, description=None, memberdef=None, member=None): self.kind = kind self.header = header self.description = description @@ -997,6 +1039,10 @@ def __init__(self, kind=None, header=None, description=None, memberdef=None): self.memberdef = [] else: self.memberdef = memberdef + if member is None: + self.member = [] + else: + self.member = member def factory(*args_, **kwargs_): if sectiondefType.subclass: return sectiondefType.subclass(*args_, **kwargs_) @@ -1011,6 +1057,10 @@ def get_memberdef(self): return self.memberdef def set_memberdef(self, memberdef): self.memberdef = memberdef def add_memberdef(self, value): self.memberdef.append(value) def insert_memberdef(self, index, value): self.memberdef[index] = value + def get_member(self): return self.member + def set_member(self, member): self.member = member + def add_member(self, value): self.member.append(value) + def insert_member(self, index, value): self.member[index] = value def get_kind(self): return self.kind def set_kind(self, kind): self.kind = kind def hasContent_(self): From 2a9ed6bccbca7621b68a20fb6b81d7e15b7b5257 Mon Sep 17 00:00:00 2001 From: jce Date: Tue, 20 Jun 2023 13:20:39 +0200 Subject: [PATCH 08/65] Store member nodes --- breathe/parser/compoundsuper.py | 34 +++++++++++++++++---------------- 1 file changed, 18 insertions(+), 16 deletions(-) diff --git a/breathe/parser/compoundsuper.py b/breathe/parser/compoundsuper.py index 74c59524..ea62e2a8 100644 --- a/breathe/parser/compoundsuper.py +++ b/breathe/parser/compoundsuper.py @@ -1082,22 +1082,24 @@ def buildAttributes(self, attrs): if attrs.get('kind'): self.kind = attrs.get('kind').value def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'header': - header_ = '' - for text__content_ in child_.childNodes: - header_ += text__content_.nodeValue - self.header = header_ - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'description': - obj_ = descriptionType.factory() - obj_.build(child_) - self.set_description(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'memberdef': - obj_ = memberdefType.factory() - obj_.build(child_) - self.memberdef.append(obj_) + if child_.nodeType == Node.ELEMENT_NODE: + if nodeName_ == 'header': + header_ = '' + for text__content_ in child_.childNodes: + header_ += text__content_.nodeValue + self.header = header_ + elif nodeName_ == 'description': + obj_ = descriptionType.factory() + obj_.build(child_) + self.set_description(obj_) + elif nodeName_ == 'memberdef': + obj_ = memberdefType.factory() + obj_.build(child_) + self.memberdef.append(obj_) + elif nodeName_ == 'member': + obj_ = MemberType.factory() + obj_.build(child_) + self.member.append(obj_) # end class sectiondefType From b1de593b6d09f123deaf60f6d9d655890a55b2cb Mon Sep 17 00:00:00 2001 From: jce Date: Wed, 21 Jun 2023 17:45:29 +0200 Subject: [PATCH 09/65] Support member references in compound --- breathe/finder/compound.py | 40 +++++++++++++++++++++++++++++++++++++- breathe/finder/factory.py | 12 +++++++++++- breathe/finder/index.py | 24 ++++++++++------------- breathe/parser/compound.py | 14 +++++++++++-- breathe/renderer/filter.py | 14 ++++++------- 5 files changed, 79 insertions(+), 25 deletions(-) diff --git a/breathe/finder/compound.py b/breathe/finder/compound.py index e38d2c6b..ed3df361 100644 --- a/breathe/finder/compound.py +++ b/breathe/finder/compound.py @@ -1,5 +1,13 @@ from breathe.finder import ItemFinder, stack -from breathe.renderer.filter import Filter +from breathe.parser.compound import compounddefTypeSub +from breathe.renderer.filter import Filter, FilterFactory +from breathe.parser import DoxygenCompoundParser + +from pprint import pprint + +from sphinx.application import Sphinx + +from typing import Any, List class DoxygenTypeSubItemFinder(ItemFinder): @@ -29,6 +37,12 @@ def filter_(self, ancestors, filter_: Filter, matches) -> None: class SectionDefTypeSubItemFinder(ItemFinder): + def __init__(self, app: Sphinx, compound_parser: DoxygenCompoundParser, *args): + super().__init__(*args) + + self.filter_factory = FilterFactory(app) + self.compound_parser = compound_parser + def filter_(self, ancestors, filter_: Filter, matches) -> None: """Find nodes which match the filter. Doesn't test this node, only its children""" @@ -40,6 +54,30 @@ def filter_(self, ancestors, filter_: Filter, matches) -> None: finder = self.item_finder_factory.create_finder(memberdef) finder.filter_(node_stack, filter_, matches) + # Descend to member children (Doxygen 1.9.7 or newer) + members = self.data_object.get_member() + # TODO: find a more precise type for the Doxygen nodes + member_matches: List[Any] = [] + for member in members: + member_finder = self.item_finder_factory.create_finder(member) + member_finder.filter_(node_stack, filter_, member_matches) + + # If there are members in this sectiondef that match the criteria + # then load up the file for the group they're in and get the member data objects + if member_matches: + matched_member_ids = (member.id for stack in matches for member in stack) + member_refid = member_matches[0][0].refid + filename = member_refid.rsplit('_', 1)[0] + file_data = self.compound_parser.parse(filename) + finder = self.item_finder_factory.create_finder(file_data) + for member_stack in member_matches: + member = member_stack[0] + if member.refid not in matched_member_ids: + ref_filter = self.filter_factory.create_id_filter( + "memberdef", member.refid + ) + finder.filter_(node_stack, ref_filter, matches) + class MemberDefTypeSubItemFinder(ItemFinder): def filter_(self, ancestors, filter_: Filter, matches) -> None: diff --git a/breathe/finder/factory.py b/breathe/finder/factory.py index 1440dae4..09fe6b64 100644 --- a/breathe/finder/factory.py +++ b/breathe/finder/factory.py @@ -20,6 +20,16 @@ def __call__(self, project_info: ProjectInfo, *args): return indexfinder.CompoundTypeSubItemFinder(self.app, compound_parser, project_info, *args) +class _CreateSectionDefTypeSubItemFinder: + def __init__(self, app: Sphinx, parser_factory: DoxygenParserFactory): + self.app = app + self.parser_factory = parser_factory + + def __call__(self, project_info: ProjectInfo, *args): + compound_parser = self.parser_factory.create_compound_parser(project_info) + return compoundfinder.SectionDefTypeSubItemFinder(self.app, compound_parser, project_info, *args) + + class DoxygenItemFinderFactory: def __init__(self, finders: Dict[str, Type[ItemFinder]], project_info: ProjectInfo): self.finders = finders @@ -65,7 +75,7 @@ def create_finder_from_root(self, root, project_info: ProjectInfo) -> Finder: "member": indexfinder.MemberTypeSubItemFinder, "doxygendef": compoundfinder.DoxygenTypeSubItemFinder, "compounddef": compoundfinder.CompoundDefTypeSubItemFinder, - "sectiondef": compoundfinder.SectionDefTypeSubItemFinder, + "sectiondef": _CreateSectionDefTypeSubItemFinder(self.app, self.parser_factory), # type: ignore "memberdef": compoundfinder.MemberDefTypeSubItemFinder, "ref": compoundfinder.RefTypeSubItemFinder, } diff --git a/breathe/finder/index.py b/breathe/finder/index.py index fef9f3e7..15009b80 100644 --- a/breathe/finder/index.py +++ b/breathe/finder/index.py @@ -47,22 +47,18 @@ def filter_(self, ancestors, filter_: Filter, matches) -> None: member_finder = self.item_finder_factory.create_finder(member) member_finder.filter_(node_stack, filter_, member_matches) + file_data = self.compound_parser.parse(self.data_object.refid) + finder = self.item_finder_factory.create_finder(file_data) + # If there are members in this compound that match the criteria # then load up the file for this compound and get the member data objects - if member_matches: - file_data = self.compound_parser.parse(self.data_object.refid) - finder = self.item_finder_factory.create_finder(file_data) - - for member_stack in member_matches: - ref_filter = self.filter_factory.create_id_filter( - "memberdef", member_stack[0].refid - ) - finder.filter_(node_stack, ref_filter, matches) - else: - # Read in the xml file referenced by the compound and descend into that as well - file_data = self.compound_parser.parse(self.data_object.refid) - finder = self.item_finder_factory.create_finder(file_data) - finder.filter_(node_stack, filter_, matches) + for member_stack in member_matches: + ref_filter = self.filter_factory.create_id_filter( + "memberdef", member_stack[0].refid + ) + finder.filter_(node_stack, ref_filter, matches) + # Read in the xml file referenced by the compound and descend into that as well + finder.filter_(node_stack, filter_, matches) class MemberTypeSubItemFinder(ItemFinder): diff --git a/breathe/parser/compound.py b/breathe/parser/compound.py index da6ee69b..6313c776 100644 --- a/breathe/parser/compound.py +++ b/breathe/parser/compound.py @@ -140,8 +140,8 @@ class sectiondefTypeSub(supermod.sectiondefType): node_type = "sectiondef" - def __init__(self, kind=None, header='', description=None, memberdef=None): - supermod.sectiondefType.__init__(self, kind, header, description, memberdef) + def __init__(self, kind=None, header='', description=None, memberdef=None, member=None): + supermod.sectiondefType.__init__(self, kind, header, description, memberdef, member) supermod.sectiondefType.subclass = sectiondefTypeSub @@ -237,6 +237,16 @@ def buildChildren(self, child_, nodeName_): # end class memberdefTypeSub +class MemberTypeSub(supermod.MemberType): + + node_type = "member" + + def __init__(self, kind=None, refid=None, name=''): + supermod.MemberType.__init__(self, kind, refid, name) +supermod.MemberType.subclass = MemberTypeSub +# end class MemberTypeSub + + class descriptionTypeSub(supermod.descriptionType): node_type = "description" diff --git a/breathe/renderer/filter.py b/breathe/renderer/filter.py index 31f90ce3..2a56f550 100644 --- a/breathe/renderer/filter.py +++ b/breathe/renderer/filter.py @@ -735,7 +735,7 @@ def _create_description_filter( def _create_public_members_filter(self, options: Dict[str, Any]) -> Filter: node = Node() - node_is_memberdef = node.node_type == "memberdef" + node_is_memberdef = (node.node_type == "memberdef") | (node.node_type == "member") node_is_public = node.prot == "public" parent = Parent() @@ -771,9 +771,8 @@ def _create_non_public_members_filter( self, prot: str, option_name: str, options: Dict[str, Any] ) -> Filter: """'prot' is the doxygen xml term for 'public', 'protected' and 'private' categories.""" - node = Node() - node_is_memberdef = node.node_type == "memberdef" + node_is_memberdef = (node.node_type == "memberdef") | (node.node_type == "member") node_is_public = node.prot == prot parent = Parent() @@ -790,7 +789,7 @@ def _create_non_public_members_filter( def _create_undoc_members_filter(self, options: Dict[str, Any]) -> Filter: node = Node() - node_is_memberdef = node.node_type == "memberdef" + node_is_memberdef = (node.node_type == "memberdef") | (node.node_type == "member") node_has_description = ( node.briefdescription.has_content() | node.detaileddescription.has_content() @@ -940,7 +939,7 @@ def create_content_filter(self, kind: str, options: Dict[str, Any]) -> Filter: node = Node() # Filter for public memberdefs - node_is_memberdef = node.node_type == "memberdef" + node_is_memberdef = (node.node_type == "memberdef") | (node.node_type == "member") node_is_public = node.prot == "public" public_members = node_is_memberdef & node_is_public @@ -1022,12 +1021,13 @@ def create_member_finder_filter(self, namespace: str, name: str, kind: str) -> F self.app.config.breathe_implementation_filename_extensions ) parent_is_compound = parent.node_type == "compound" + parent_is_sectiondef = parent.node_type == "sectiondef" parent_is_file = (parent.kind == "file") & (~is_implementation_file) parent_is_not_file = parent.kind != "file" return (parent_is_compound & parent_is_file & node_matches) | ( - parent_is_compound & parent_is_not_file & node_matches - ) + parent_is_compound & parent_is_not_file & node_matches) | ( + parent_is_sectiondef & parent_is_not_file & node_matches) def create_function_and_all_friend_finder_filter(self, namespace: str, name: str) -> Filter: parent = Parent() From b6359af92fe9551201a319434bbe90e0d8635a9c Mon Sep 17 00:00:00 2001 From: Jasper Craeghs Date: Wed, 28 Jun 2023 11:30:29 +0200 Subject: [PATCH 10/65] Remove unused import 'pprint' --- breathe/finder/compound.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/breathe/finder/compound.py b/breathe/finder/compound.py index ed3df361..c48cdbd1 100644 --- a/breathe/finder/compound.py +++ b/breathe/finder/compound.py @@ -3,8 +3,6 @@ from breathe.renderer.filter import Filter, FilterFactory from breathe.parser import DoxygenCompoundParser -from pprint import pprint - from sphinx.application import Sphinx from typing import Any, List From 06f37d8e2e65ad8a1f7b6970cd4b0a83b76d0ddd Mon Sep 17 00:00:00 2001 From: jce Date: Mon, 30 Oct 2023 14:16:20 +0100 Subject: [PATCH 11/65] Revert bad changes --- breathe/renderer/filter.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/breathe/renderer/filter.py b/breathe/renderer/filter.py index 2a56f550..4cb2c8e3 100644 --- a/breathe/renderer/filter.py +++ b/breathe/renderer/filter.py @@ -735,7 +735,7 @@ def _create_description_filter( def _create_public_members_filter(self, options: Dict[str, Any]) -> Filter: node = Node() - node_is_memberdef = (node.node_type == "memberdef") | (node.node_type == "member") + node_is_memberdef = node.node_type == "memberdef" node_is_public = node.prot == "public" parent = Parent() @@ -772,7 +772,7 @@ def _create_non_public_members_filter( ) -> Filter: """'prot' is the doxygen xml term for 'public', 'protected' and 'private' categories.""" node = Node() - node_is_memberdef = (node.node_type == "memberdef") | (node.node_type == "member") + node_is_memberdef = node.node_type == "memberdef" node_is_public = node.prot == prot parent = Parent() @@ -789,7 +789,7 @@ def _create_non_public_members_filter( def _create_undoc_members_filter(self, options: Dict[str, Any]) -> Filter: node = Node() - node_is_memberdef = (node.node_type == "memberdef") | (node.node_type == "member") + node_is_memberdef = node.node_type == "memberdef" node_has_description = ( node.briefdescription.has_content() | node.detaileddescription.has_content() @@ -939,7 +939,7 @@ def create_content_filter(self, kind: str, options: Dict[str, Any]) -> Filter: node = Node() # Filter for public memberdefs - node_is_memberdef = (node.node_type == "memberdef") | (node.node_type == "member") + node_is_memberdef = node.node_type == "memberdef" node_is_public = node.prot == "public" public_members = node_is_memberdef & node_is_public From 86b06e9a322be6e56af0a5c537c6dcdf12fdb3e5 Mon Sep 17 00:00:00 2001 From: jce Date: Mon, 30 Oct 2023 14:25:48 +0100 Subject: [PATCH 12/65] Revert unneeded change --- breathe/finder/index.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/breathe/finder/index.py b/breathe/finder/index.py index 15009b80..90ae5710 100644 --- a/breathe/finder/index.py +++ b/breathe/finder/index.py @@ -57,8 +57,9 @@ def filter_(self, ancestors, filter_: Filter, matches) -> None: "memberdef", member_stack[0].refid ) finder.filter_(node_stack, ref_filter, matches) - # Read in the xml file referenced by the compound and descend into that as well - finder.filter_(node_stack, filter_, matches) + if not member_matches: + # Read in the xml file referenced by the compound and descend into that as well + finder.filter_(node_stack, filter_, matches) class MemberTypeSubItemFinder(ItemFinder): From 4d89014ac74cf2b792adb1ef471115a6e0c0b86a Mon Sep 17 00:00:00 2001 From: jce Date: Mon, 30 Oct 2023 14:27:49 +0100 Subject: [PATCH 13/65] Minor refactoring to make diff in PR smaller --- breathe/finder/index.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/breathe/finder/index.py b/breathe/finder/index.py index 90ae5710..06eae6e3 100644 --- a/breathe/finder/index.py +++ b/breathe/finder/index.py @@ -52,12 +52,13 @@ def filter_(self, ancestors, filter_: Filter, matches) -> None: # If there are members in this compound that match the criteria # then load up the file for this compound and get the member data objects - for member_stack in member_matches: - ref_filter = self.filter_factory.create_id_filter( - "memberdef", member_stack[0].refid - ) - finder.filter_(node_stack, ref_filter, matches) - if not member_matches: + if member_matches: + for member_stack in member_matches: + ref_filter = self.filter_factory.create_id_filter( + "memberdef", member_stack[0].refid + ) + finder.filter_(node_stack, ref_filter, matches) + else: # Read in the xml file referenced by the compound and descend into that as well finder.filter_(node_stack, filter_, matches) From 349815838ac1c32ab57aef6d4dbccb0c7de2d6f7 Mon Sep 17 00:00:00 2001 From: jce Date: Mon, 30 Oct 2023 19:11:02 +0100 Subject: [PATCH 14/65] Use a matrix to test Doxygen 1.9.7 --- .github/workflows/documentation.yml | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml index e4ede026..ca662b24 100644 --- a/.github/workflows/documentation.yml +++ b/.github/workflows/documentation.yml @@ -5,6 +5,10 @@ jobs: runs-on: ubuntu-latest + strategy: + matrix: + doxygen_version: ["1.9.4", "1.9.7"] + steps: - uses: actions/checkout@v2 - uses: actions/cache@v1 @@ -24,14 +28,12 @@ jobs: sudo apt-get -y install graphviz libclang1-11 libclang-cpp11 - name: install doxygen from SF binary archives - env: - DOXYGEN_VERSION: 1.9.4 run: | mkdir doxygen-bin-arc && cd doxygen-bin-arc - curl -L https://sourceforge.net/projects/doxygen/files/rel-$DOXYGEN_VERSION/doxygen-$DOXYGEN_VERSION.linux.bin.tar.gz > doxygen.tar.gz + curl -L https://sourceforge.net/projects/doxygen/files/rel-${{ matrix.doxygen_version }}/doxygen-${{ matrix.doxygen_version }}.linux.bin.tar.gz > doxygen.tar.gz gunzip doxygen.tar.gz tar xf doxygen.tar - cd doxygen-$DOXYGEN_VERSION + cd doxygen-${{ matrix.doxygen_version }} sudo make install - name: build the documentation From 20f9a221e83a6d2444d886328d3925e8a9d3b3f6 Mon Sep 17 00:00:00 2001 From: jce Date: Wed, 1 Nov 2023 16:28:10 +0100 Subject: [PATCH 15/65] Always read in the xml file referenced by the compound --- breathe/finder/index.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/breathe/finder/index.py b/breathe/finder/index.py index 06eae6e3..f6129a5f 100644 --- a/breathe/finder/index.py +++ b/breathe/finder/index.py @@ -58,9 +58,9 @@ def filter_(self, ancestors, filter_: Filter, matches) -> None: "memberdef", member_stack[0].refid ) finder.filter_(node_stack, ref_filter, matches) - else: - # Read in the xml file referenced by the compound and descend into that as well - finder.filter_(node_stack, filter_, matches) + + # Read in the xml file referenced by the compound and descend into that as well + finder.filter_(node_stack, filter_, matches) class MemberTypeSubItemFinder(ItemFinder): From 5b23a92fd36abb7d3a742a29fdb5afee87806d4d Mon Sep 17 00:00:00 2001 From: jce Date: Wed, 1 Nov 2023 17:12:20 +0100 Subject: [PATCH 16/65] Test doxygenfunction directive with function in group --- documentation/source/conf.py | 1 + documentation/source/function.rst | 8 ++++---- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/documentation/source/conf.py b/documentation/source/conf.py index c1cf8c34..c84191ac 100644 --- a/documentation/source/conf.py +++ b/documentation/source/conf.py @@ -250,6 +250,7 @@ "membergroups": "../../examples/specific/membergroups/xml/", "simplesect": "../../examples/specific/simplesect/xml/", "dot_graphs": "../../examples/specific/dot_graphs/xml/", + "group_cpp": "../../examples/doxygen/group/xml/", } breathe_projects_source = {"auto": ("../../examples/specific", ["auto_function.h", "auto_class.h"])} diff --git a/documentation/source/function.rst b/documentation/source/function.rst index 4f2f1bf2..ffaa07fa 100644 --- a/documentation/source/function.rst +++ b/documentation/source/function.rst @@ -24,13 +24,13 @@ This should work: .. code-block:: rst - .. doxygenfunction:: open - :project: structcmd + .. doxygenfunction:: func2 + :project: group_cpp It produces this output: -.. doxygenfunction:: open - :project: structcmd +.. doxygenfunction:: func2 + :project: group_cpp Separated Declaration & Implementation Example ---------------------------------------------- From 1247172a4bc7e04b595864a0b05b78b23cc8a120 Mon Sep 17 00:00:00 2001 From: Rouslan Korneychuk Date: Thu, 16 Nov 2023 06:21:55 -0500 Subject: [PATCH 17/65] Began incorporating C parser --- breathe/finder/__init__.py | 11 +- breathe/finder/factory.py | 15 +- breathe/finder/index.py | 4 +- breathe/{parser/__init__.py => parser.py} | 75 +- breathe/parser/compound.py | 1211 ---- breathe/parser/compoundsuper.py | 6056 --------------------- breathe/parser/index.py | 60 - breathe/parser/indexsuper.py | 359 -- breathe/renderer/sphinxrenderer.py | 4 +- pyproject.toml | 3 + requirements/development.txt | 4 +- setup.cfg | 2 +- setup.py | 83 +- tests/test_renderer.py | 81 +- xml_parser_generator/CMakeLists.txt | 38 + xml_parser_generator/make_parser.py | 644 +++ xml_parser_generator/module_template.c | 1993 +++++++ xml_parser_generator/schema.json | 1431 +++++ xml_parser_generator/stubs_template.pyi | 124 + 19 files changed, 4403 insertions(+), 7795 deletions(-) rename breathe/{parser/__init__.py => parser.py} (53%) delete mode 100644 breathe/parser/compound.py delete mode 100644 breathe/parser/compoundsuper.py delete mode 100644 breathe/parser/index.py delete mode 100644 breathe/parser/indexsuper.py create mode 100644 xml_parser_generator/CMakeLists.txt create mode 100644 xml_parser_generator/make_parser.py create mode 100644 xml_parser_generator/module_template.c create mode 100644 xml_parser_generator/schema.json create mode 100644 xml_parser_generator/stubs_template.pyi diff --git a/breathe/finder/__init__.py b/breathe/finder/__init__.py index 4217ce5f..65bacc21 100644 --- a/breathe/finder/__init__.py +++ b/breathe/finder/__init__.py @@ -1,6 +1,12 @@ +from __future__ import annotations + from breathe.project import ProjectInfo from breathe.renderer.filter import Filter +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from breathe.finder.factory import DoxygenItemFinderFactory def stack(element, list_): """Stack an element on to the start of a list and return as a new list""" @@ -12,10 +18,9 @@ def stack(element, list_): class ItemFinder: - def __init__(self, project_info: ProjectInfo, data_object, item_finder_factory): + def __init__(self, project_info: ProjectInfo, data_object, item_finder_factory: DoxygenItemFinderFactory): self.data_object = data_object - # DoxygenItemFinderFactory, but actually typing it would introduce an import cycle - self.item_finder_factory = item_finder_factory + self.item_finder_factory: DoxygenItemFinderFactory = item_finder_factory self.project_info = project_info def filter_(self, ancestors, filter_: Filter, matches) -> None: diff --git a/breathe/finder/factory.py b/breathe/finder/factory.py index 1440dae4..23b04137 100644 --- a/breathe/finder/factory.py +++ b/breathe/finder/factory.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from breathe.finder import ItemFinder from breathe.finder import index as indexfinder from breathe.finder import compound as compoundfinder @@ -7,7 +9,10 @@ from sphinx.application import Sphinx -from typing import Dict, Type +from typing import Any, Callable, TYPE_CHECKING + +if TYPE_CHECKING: + ItemFinderCreator = Callable[[ProjectInfo,Any,'DoxygenItemFinderFactory'],ItemFinder] class _CreateCompoundTypeSubFinder: @@ -15,13 +20,13 @@ def __init__(self, app: Sphinx, parser_factory: DoxygenParserFactory): self.app = app self.parser_factory = parser_factory - def __call__(self, project_info: ProjectInfo, *args): + def __call__(self, project_info: ProjectInfo, *args) -> indexfinder.CompoundTypeSubItemFinder: compound_parser = self.parser_factory.create_compound_parser(project_info) return indexfinder.CompoundTypeSubItemFinder(self.app, compound_parser, project_info, *args) class DoxygenItemFinderFactory: - def __init__(self, finders: Dict[str, Type[ItemFinder]], project_info: ProjectInfo): + def __init__(self, finders: dict[str, ItemFinderCreator], project_info: ProjectInfo): self.finders = finders self.project_info = project_info @@ -59,9 +64,9 @@ def create_finder(self, project_info: ProjectInfo) -> Finder: return self.create_finder_from_root(root, project_info) def create_finder_from_root(self, root, project_info: ProjectInfo) -> Finder: - finders: Dict[str, Type[ItemFinder]] = { + finders: dict[str, ItemFinderCreator] = { "doxygen": indexfinder.DoxygenTypeSubItemFinder, - "compound": _CreateCompoundTypeSubFinder(self.app, self.parser_factory), # type: ignore + "compound": _CreateCompoundTypeSubFinder(self.app, self.parser_factory), "member": indexfinder.MemberTypeSubItemFinder, "doxygendef": compoundfinder.DoxygenTypeSubItemFinder, "compounddef": compoundfinder.CompoundDefTypeSubItemFinder, diff --git a/breathe/finder/index.py b/breathe/finder/index.py index fef9f3e7..0a142289 100644 --- a/breathe/finder/index.py +++ b/breathe/finder/index.py @@ -4,7 +4,7 @@ from sphinx.application import Sphinx -from typing import Any, List +from typing import Any class DoxygenTypeSubItemFinder(ItemFinder): @@ -42,7 +42,7 @@ def filter_(self, ancestors, filter_: Filter, matches) -> None: # Descend to member children members = self.data_object.get_member() # TODO: find a more precise type for the Doxygen nodes - member_matches: List[Any] = [] + member_matches: list[Any] = [] for member in members: member_finder = self.item_finder_factory.create_finder(member) member_finder.filter_(node_stack, filter_, member_matches) diff --git a/breathe/parser/__init__.py b/breathe/parser.py similarity index 53% rename from breathe/parser/__init__.py rename to breathe/parser.py index 9a656cce..cccc6cb2 100644 --- a/breathe/parser/__init__.py +++ b/breathe/parser.py @@ -1,25 +1,26 @@ -from . import index -from . import compound - from breathe import file_state_cache, path_handler from breathe.project import ProjectInfo +from breathe._parser import * + from sphinx.application import Sphinx -class ParserError(Exception): - def __init__(self, error: Exception, filename: str): +class ParserError(RuntimeError): + def __init__(self, error: str, filename: str): super().__init__(error) self.error = error self.filename = filename def __str__(self): - return ("file %s: %s" % (self.filename, self.error)) + # TODO: update _parser.ParseError to store the line number and message + # as separate fields for better formatting here + return f"file {self.filename}: {self.error}" -class FileIOError(Exception): - def __init__(self, error: Exception, filename: str): +class FileIOError(RuntimeError): + def __init__(self, error: str, filename: str): super().__init__(error) self.error = error @@ -30,26 +31,34 @@ class Parser: def __init__(self, app: Sphinx, cache): self.app = app self.cache = cache - - -class DoxygenIndexParser(Parser): - def parse(self, project_info: ProjectInfo): - filename = path_handler.resolve_path(self.app, project_info.project_path(), "index.xml") - file_state_cache.update(self.app, filename) - + + def _parse_common(self,filename: str, right_tag: str) -> Node_DoxygenTypeIndex | Node_DoxygenType: try: # Try to get from our cache return self.cache[filename] except KeyError: # If that fails, parse it afresh try: - result = index.parse(filename) + with open(filename,'rb') as file: + result = parse_file(file) + if result.name != right_tag: + raise ParserError(f'expected "{right_tag}" root element, not "{result.name}"',filename) self.cache[filename] = result - return result - except index.ParseError as e: - raise ParserError(e, filename) - except index.FileIOError as e: - raise FileIOError(e, filename) + return result.value + except ParseError as e: + raise ParserError(str(e), filename) + except IOError as e: + raise FileIOError(str(e), filename) + + +class DoxygenIndexParser(Parser): + def parse(self, project_info: ProjectInfo) -> Node_DoxygenTypeIndex: + filename = path_handler.resolve_path(self.app, project_info.project_path(), "index.xml") + file_state_cache.update(self.app, filename) + + r = self._parse_common(filename, 'doxygenindex') + assert isinstance(r,Node_DoxygenTypeIndex) + return r class DoxygenCompoundParser(Parser): @@ -59,36 +68,24 @@ def __init__(self, app: Sphinx, cache, self.project_info = project_info - def parse(self, refid: str): + def parse(self, refid: str) -> Node_DoxygenType: filename = path_handler.resolve_path( self.app, self.project_info.project_path(), - "%s.xml" % refid + f"{refid}.xml" ) file_state_cache.update(self.app, filename) - try: - # Try to get from our cache - return self.cache[filename] - except KeyError: - # If that fails, parse it afresh - try: - result = compound.parse(filename) - self.cache[filename] = result - return result - except compound.ParseError as e: - raise ParserError(e, filename) - except compound.FileIOError as e: - raise FileIOError(e, filename) + r = self._parse_common(filename, 'doxygen') + assert isinstance(r,Node_DoxygenType) + return r class DoxygenParserFactory: def __init__(self, app: Sphinx) -> None: self.app = app - # TODO: do we have a base class for all the Doxygen XML node types - # that we can use for typing? - self.cache = {} # type: ignore + self.cache: dict[str, Node_DoxygenType | Node_DoxygenTypeIndex] = {} def create_index_parser(self) -> DoxygenIndexParser: return DoxygenIndexParser(self.app, self.cache) diff --git a/breathe/parser/compound.py b/breathe/parser/compound.py deleted file mode 100644 index da6ee69b..00000000 --- a/breathe/parser/compound.py +++ /dev/null @@ -1,1211 +0,0 @@ -""" -Generated Mon Feb 9 19:08:05 2009 by generateDS.py. -This file contains manual modifications. -""" - -from xml.dom import minidom -from xml.dom import Node -from xml.parsers.expat import ExpatError - -from . import compoundsuper as supermod -from .compoundsuper import MixedContainer - - -class DoxygenTypeSub(supermod.DoxygenType): - - node_type = "doxygendef" - - def __init__(self, version=None, compounddef=None): - supermod.DoxygenType.__init__(self, version, compounddef) - - -supermod.DoxygenType.subclass = DoxygenTypeSub -# end class DoxygenTypeSub - - -class compounddefTypeSub(supermod.compounddefType): - - node_type = "compounddef" - - def __init__(self, kind=None, prot=None, id=None, compoundname='', title='', - basecompoundref=None, derivedcompoundref=None, includes=None, includedby=None, - incdepgraph=None, invincdepgraph=None, innerdir=None, innerfile=None, - innerclass=None, innernamespace=None, innerpage=None, innergroup=None, - templateparamlist=None, sectiondef=None, briefdescription=None, - detaileddescription=None, inheritancegraph=None, collaborationgraph=None, - programlisting=None, location=None, listofallmembers=None, language=None): - - supermod.compounddefType.__init__(self, kind, prot, id, compoundname, title, - basecompoundref, derivedcompoundref, includes, includedby, - incdepgraph, invincdepgraph, innerdir, innerfile, - innerclass, innernamespace, innerpage, innergroup, - templateparamlist, sectiondef, briefdescription, - detaileddescription, inheritancegraph, collaborationgraph, - programlisting, location, listofallmembers, language) - - -supermod.compounddefType.subclass = compounddefTypeSub -# end class compounddefTypeSub - - -class listofallmembersTypeSub(supermod.listofallmembersType): - - node_type = "listofallmembers" - - def __init__(self, member=None): - supermod.listofallmembersType.__init__(self, member) - - -supermod.listofallmembersType.subclass = listofallmembersTypeSub -# end class listofallmembersTypeSub - - -class memberRefTypeSub(supermod.memberRefType): - - node_type = "memberref" - - def __init__(self, virt=None, prot=None, refid=None, ambiguityscope=None, scope='', name=''): - supermod.memberRefType.__init__(self, virt, prot, refid, ambiguityscope, scope, name) - - -supermod.memberRefType.subclass = memberRefTypeSub -# end class memberRefTypeSub - - -class compoundRefTypeSub(supermod.compoundRefType): - - node_type = "compoundref" - - def __init__(self, virt=None, prot=None, refid=None, valueOf_='', mixedclass_=None, - content_=None): - supermod.compoundRefType.__init__(self, mixedclass_, content_) - - -supermod.compoundRefType.subclass = compoundRefTypeSub -# end class compoundRefTypeSub - - -class reimplementTypeSub(supermod.reimplementType): - - node_type = "reimplement" - - def __init__(self, refid=None, valueOf_='', mixedclass_=None, content_=None): - supermod.reimplementType.__init__(self, mixedclass_, content_) - - -supermod.reimplementType.subclass = reimplementTypeSub -# end class reimplementTypeSub - - -class incTypeSub(supermod.incType): - - node_type = "inc" - - def __init__(self, local=None, refid=None, valueOf_='', mixedclass_=None, content_=None): - supermod.incType.__init__(self, mixedclass_, content_) - - -supermod.incType.subclass = incTypeSub -# end class incTypeSub - - -class refTypeSub(supermod.refType): - - node_type = "ref" - - def __init__(self, node_name, prot=None, refid=None, valueOf_='', mixedclass_=None, - content_=None): - supermod.refType.__init__(self, mixedclass_, content_) - - self.node_name = node_name - - -supermod.refType.subclass = refTypeSub - - -class refTextTypeSub(supermod.refTextType): - - node_type = "reftex" - - def __init__(self, refid=None, kindref=None, external=None, valueOf_='', mixedclass_=None, - content_=None): - supermod.refTextType.__init__(self, mixedclass_, content_) - - -supermod.refTextType.subclass = refTextTypeSub -# end class refTextTypeSub - - -class sectiondefTypeSub(supermod.sectiondefType): - - node_type = "sectiondef" - - def __init__(self, kind=None, header='', description=None, memberdef=None): - supermod.sectiondefType.__init__(self, kind, header, description, memberdef) - - -supermod.sectiondefType.subclass = sectiondefTypeSub -# end class sectiondefTypeSub - - -class memberdefTypeSub(supermod.memberdefType): - - node_type = "memberdef" - - def __init__(self, initonly=None, kind=None, volatile=None, const=None, raise_=None, virt=None, - readable=None, prot=None, explicit=None, new=None, final=None, writable=None, - add=None, static=None, strong=None, remove=None, sealed=None, mutable=None, - gettable=None, inline=None, settable=None, id=None, templateparamlist=None, - type_=None, definition='', argsstring='', name='', read='', write='', bitfield='', - reimplements=None, reimplementedby=None, param=None, enumvalue=None, - initializer=None, exceptions=None, briefdescription=None, detaileddescription=None, - inbodydescription=None, location=None, references=None, referencedby=None, - refqual=None): - - supermod.memberdefType.__init__(self, initonly, kind, volatile, const, raise_, virt, - readable, prot, explicit, new, final, writable, add, static, - strong, remove, sealed, mutable, gettable, inline, settable, - id, templateparamlist, type_, definition, argsstring, name, - read, write, bitfield, reimplements, reimplementedby, param, - enumvalue, initializer, exceptions, briefdescription, - detaileddescription, inbodydescription, location, - references, referencedby, refqual) - - self.parameterlist = supermod.docParamListType.factory() - self.parameterlist.kind = "param" - - def buildChildren(self, child_, nodeName_): - supermod.memberdefType.buildChildren(self, child_, nodeName_) - - if child_.nodeType == Node.ELEMENT_NODE and nodeName_ == 'param': - - # Get latest param - param = self.param[-1] - - # If it doesn't have a description we're done - if not param.briefdescription: - return - - # Construct our own param list from the descriptions stored inline - # with the parameters - paramdescription = param.briefdescription - paramname = supermod.docParamName.factory() - - # Add parameter name - obj_ = paramname.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', - param.declname) - paramname.content_.append(obj_) - - paramnamelist = supermod.docParamNameList.factory() - paramnamelist.parametername.append(paramname) - - paramlistitem = supermod.docParamListItem.factory() - paramlistitem.parameternamelist.append(paramnamelist) - - # Add parameter description - paramlistitem.parameterdescription = paramdescription - - self.parameterlist.parameteritem.append(paramlistitem) - - elif child_.nodeType == Node.ELEMENT_NODE and nodeName_ == 'detaileddescription': - - if not self.parameterlist.parameteritem: - # No items in our list - return - - # Assume supermod.memberdefType.buildChildren has already built the - # description object, we just want to slot our parameterlist in at - # a reasonable point - - if not self.detaileddescription: - # Create one if it doesn't exist - self.detaileddescription = supermod.descriptionType.factory() - - detaileddescription = self.detaileddescription - - para = supermod.docParaType.factory() - para.parameterlist.append(self.parameterlist) - - obj_ = detaileddescription.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'para', para) - - index = 0 - detaileddescription.content_.insert(index, obj_) - - -supermod.memberdefType.subclass = memberdefTypeSub -# end class memberdefTypeSub - - -class descriptionTypeSub(supermod.descriptionType): - - node_type = "description" - - def __init__(self, title='', para=None, sect1=None, internal=None, mixedclass_=None, - content_=None): - supermod.descriptionType.__init__(self, mixedclass_, content_) - - -supermod.descriptionType.subclass = descriptionTypeSub -# end class descriptionTypeSub - - -class enumvalueTypeSub(supermod.enumvalueType): - - node_type = "enumvalue" - - def __init__(self, prot=None, id=None, name='', initializer=None, briefdescription=None, - detaileddescription=None, mixedclass_=None, content_=None): - supermod.enumvalueType.__init__(self, mixedclass_, content_) - - self.initializer = None - - def buildChildren(self, child_, nodeName_): - # Get text from child and put it in self.name - if child_.nodeType == Node.ELEMENT_NODE and nodeName_ == 'name': - value_ = [] - for text_ in child_.childNodes: - value_.append(text_.nodeValue) - valuestr_ = ''.join(value_) - self.name = valuestr_ - elif child_.nodeType == Node.ELEMENT_NODE and nodeName_ == 'briefdescription': - obj_ = supermod.descriptionType.factory() - obj_.build(child_) - self.set_briefdescription(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and nodeName_ == 'detaileddescription': - obj_ = supermod.descriptionType.factory() - obj_.build(child_) - self.set_detaileddescription(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and nodeName_ == 'initializer': - childobj_ = supermod.linkedTextType.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, - 'initializer', childobj_) - self.set_initializer(obj_) - self.content_.append(obj_) - - -supermod.enumvalueType.subclass = enumvalueTypeSub -# end class enumvalueTypeSub - - -class templateparamlistTypeSub(supermod.templateparamlistType): - - node_type = "templateparamlist" - - def __init__(self, param=None): - supermod.templateparamlistType.__init__(self, param) - - -supermod.templateparamlistType.subclass = templateparamlistTypeSub -# end class templateparamlistTypeSub - - -class paramTypeSub(supermod.paramType): - - node_type = "param" - - def __init__(self, type_=None, declname='', defname='', array='', defval=None, - briefdescription=None): - supermod.paramType.__init__(self, type_, declname, defname, array, defval, briefdescription) - - -supermod.paramType.subclass = paramTypeSub -# end class paramTypeSub - - -class linkedTextTypeSub(supermod.linkedTextType): - - node_type = "linkedtext" - - def __init__(self, ref=None, mixedclass_=None, content_=None): - supermod.linkedTextType.__init__(self, mixedclass_, content_) - - -supermod.linkedTextType.subclass = linkedTextTypeSub -# end class linkedTextTypeSub - - -class graphTypeSub(supermod.graphType): - - node_type = "graph" - - def __init__(self, node=None, direction="forward", caption=""): - supermod.graphType.__init__(self, node, direction, caption) - - -supermod.graphType.subclass = graphTypeSub -# end class graphTypeSub - - -class nodeTypeSub(supermod.nodeType): - - node_type = "node" - - def __init__(self, id=None, label='', link=None, childnode=None): - supermod.nodeType.__init__(self, id, label, link, childnode) - - -supermod.nodeType.subclass = nodeTypeSub -# end class nodeTypeSub - - -class childnodeTypeSub(supermod.childnodeType): - - node_type = "childnode" - - def __init__(self, relation=None, refid=None, edgelabel=None): - supermod.childnodeType.__init__(self, relation, refid, edgelabel) - - -supermod.childnodeType.subclass = childnodeTypeSub -# end class childnodeTypeSub - - -class linkTypeSub(supermod.linkType): - - node_type = "link" - - def __init__(self, refid=None, external=None, valueOf_=''): - supermod.linkType.__init__(self, refid, external) - - -supermod.linkType.subclass = linkTypeSub -# end class linkTypeSub - - -class listingTypeSub(supermod.listingType): - - node_type = "listing" - - def __init__(self, codeline=None, domain=None): - supermod.listingType.__init__(self, codeline, domain) - - -supermod.listingType.subclass = listingTypeSub -# end class listingTypeSub - - -class codelineTypeSub(supermod.codelineType): - - node_type = "codeline" - - def __init__(self, external=None, lineno=None, refkind=None, refid=None, highlight=None): - supermod.codelineType.__init__(self, external, lineno, refkind, refid, highlight) - - -supermod.codelineType.subclass = codelineTypeSub -# end class codelineTypeSub - - -class highlightTypeSub(supermod.highlightType): - - node_type = "highlight" - - def __init__(self, class_=None, sp=None, ref=None, mixedclass_=None, content_=None): - supermod.highlightType.__init__(self, mixedclass_, content_) - - -supermod.highlightType.subclass = highlightTypeSub -# end class highlightTypeSub - - -class referenceTypeSub(supermod.referenceType): - - node_type = "reference" - - def __init__(self, endline=None, startline=None, refid=None, compoundref=None, valueOf_='', - mixedclass_=None, content_=None): - supermod.referenceType.__init__(self, mixedclass_, content_) - - -supermod.referenceType.subclass = referenceTypeSub -# end class referenceTypeSub - - -class locationTypeSub(supermod.locationType): - - node_type = "location" - - def __init__(self, bodystart=None, line=None, bodyend=None, bodyfile=None, file=None, - valueOf_=''): - supermod.locationType.__init__(self, bodystart, line, bodyend, bodyfile, file) - - -supermod.locationType.subclass = locationTypeSub -# end class locationTypeSub - - -class docSect1TypeSub(supermod.docSect1Type): - - node_type = "docsect1" - - def __init__(self, id=None, title='', para=None, sect2=None, internal=None, mixedclass_=None, - content_=None): - supermod.docSect1Type.__init__(self, mixedclass_, content_) - - -supermod.docSect1Type.subclass = docSect1TypeSub -# end class docSect1TypeSub - - -class docSect2TypeSub(supermod.docSect2Type): - - node_type = "docsect2" - - def __init__(self, id=None, title='', para=None, sect3=None, internal=None, mixedclass_=None, - content_=None): - supermod.docSect2Type.__init__(self, mixedclass_, content_) - - -supermod.docSect2Type.subclass = docSect2TypeSub -# end class docSect2TypeSub - - -class docSect3TypeSub(supermod.docSect3Type): - - node_type = "docsect3" - - def __init__(self, id=None, title='', para=None, sect4=None, internal=None, mixedclass_=None, - content_=None): - supermod.docSect3Type.__init__(self, mixedclass_, content_) - - -supermod.docSect3Type.subclass = docSect3TypeSub -# end class docSect3TypeSub - - -class docSect4TypeSub(supermod.docSect4Type): - - node_type = "docsect4" - - def __init__(self, id=None, title='', para=None, internal=None, mixedclass_=None, - content_=None): - supermod.docSect4Type.__init__(self, mixedclass_, content_) - - -supermod.docSect4Type.subclass = docSect4TypeSub -# end class docSect4TypeSub - - -class docInternalTypeSub(supermod.docInternalType): - - node_type = "docinternal" - - def __init__(self, para=None, sect1=None, mixedclass_=None, content_=None): - supermod.docInternalType.__init__(self, mixedclass_, content_) - - -supermod.docInternalType.subclass = docInternalTypeSub -# end class docInternalTypeSub - - -class docInternalS1TypeSub(supermod.docInternalS1Type): - - node_type = "docinternals1" - - def __init__(self, para=None, sect2=None, mixedclass_=None, content_=None): - supermod.docInternalS1Type.__init__(self, mixedclass_, content_) - - -supermod.docInternalS1Type.subclass = docInternalS1TypeSub -# end class docInternalS1TypeSub - - -class docInternalS2TypeSub(supermod.docInternalS2Type): - - node_type = "docinternals2" - - def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None): - supermod.docInternalS2Type.__init__(self, mixedclass_, content_) - - -supermod.docInternalS2Type.subclass = docInternalS2TypeSub -# end class docInternalS2TypeSub - - -class docInternalS3TypeSub(supermod.docInternalS3Type): - - node_type = "docinternals3" - - def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None): - supermod.docInternalS3Type.__init__(self, mixedclass_, content_) - - -supermod.docInternalS3Type.subclass = docInternalS3TypeSub -# end class docInternalS3TypeSub - - -class docInternalS4TypeSub(supermod.docInternalS4Type): - - node_type = "docinternals4" - - def __init__(self, para=None, mixedclass_=None, content_=None): - supermod.docInternalS4Type.__init__(self, mixedclass_, content_) - - -supermod.docInternalS4Type.subclass = docInternalS4TypeSub -# end class docInternalS4TypeSub - - -class docURLLinkSub(supermod.docURLLink): - - node_type = "docurllink" - - def __init__(self, url=None, valueOf_='', mixedclass_=None, content_=None): - supermod.docURLLink.__init__(self, mixedclass_, content_) - - -supermod.docURLLink.subclass = docURLLinkSub -# end class docURLLinkSub - - -class docAnchorTypeSub(supermod.docAnchorType): - - node_type = "docanchor" - - def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None): - supermod.docAnchorType.__init__(self, mixedclass_, content_) - - -supermod.docAnchorType.subclass = docAnchorTypeSub -# end class docAnchorTypeSub - - -class docFormulaTypeSub(supermod.docFormulaType): - - node_type = "docformula" - - def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None): - supermod.docFormulaType.__init__(self, mixedclass_, content_) - - -supermod.docFormulaType.subclass = docFormulaTypeSub -# end class docFormulaTypeSub - - -class docIndexEntryTypeSub(supermod.docIndexEntryType): - - node_type = "docindexentry" - - def __init__(self, primaryie='', secondaryie=''): - supermod.docIndexEntryType.__init__(self, primaryie, secondaryie) - - -supermod.docIndexEntryType.subclass = docIndexEntryTypeSub -# end class docIndexEntryTypeSub - - -class docListTypeSub(supermod.docListType): - - node_type = "doclist" - - def __init__(self, listitem=None, subtype=""): - self.node_subtype = "itemized" - if subtype != "": - self.node_subtype = subtype - supermod.docListType.__init__(self, listitem) - - -supermod.docListType.subclass = docListTypeSub -# end class docListTypeSub - - -class docListItemTypeSub(supermod.docListItemType): - - node_type = "doclistitem" - - def __init__(self, para=None): - supermod.docListItemType.__init__(self, para) - - -supermod.docListItemType.subclass = docListItemTypeSub -# end class docListItemTypeSub - - -class docSimpleSectTypeSub(supermod.docSimpleSectType): - - node_type = "docsimplesect" - - def __init__(self, kind=None, title=None, para=None): - supermod.docSimpleSectType.__init__(self, kind, title, para) - - -supermod.docSimpleSectType.subclass = docSimpleSectTypeSub -# end class docSimpleSectTypeSub - - -class docVarListEntryTypeSub(supermod.docVarListEntryType): - - node_type = "docvarlistentry" - - def __init__(self, term=None): - supermod.docVarListEntryType.__init__(self, term) - - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and nodeName_ == 'term': - obj_ = supermod.docTitleType.factory() - obj_.build(child_) - self.set_term(obj_) - - -supermod.docVarListEntryType.subclass = docVarListEntryTypeSub -# end class docVarListEntryTypeSub - - -class docRefTextTypeSub(supermod.docRefTextType): - - node_type = "docreftext" - - def __init__(self, refid=None, kindref=None, external=None, valueOf_='', mixedclass_=None, - content_=None): - supermod.docRefTextType.__init__(self, mixedclass_, content_) - - self.para = [] - - def buildChildren(self, child_, nodeName_): - supermod.docRefTextType.buildChildren(self, child_, nodeName_) - - if child_.nodeType == Node.ELEMENT_NODE and nodeName_ == 'para': - obj_ = supermod.docParaType.factory() - obj_.build(child_) - self.para.append(obj_) - - -supermod.docRefTextType.subclass = docRefTextTypeSub -# end class docRefTextTypeSub - - -class docTableTypeSub(supermod.docTableType): - - node_type = "doctable" - - def __init__(self, rows=None, cols=None, row=None, caption=None): - supermod.docTableType.__init__(self, rows, cols, row, caption) - - -supermod.docTableType.subclass = docTableTypeSub -# end class docTableTypeSub - - -class docRowTypeSub(supermod.docRowType): - - node_type = "docrow" - - def __init__(self, entry=None): - supermod.docRowType.__init__(self, entry) - - -supermod.docRowType.subclass = docRowTypeSub -# end class docRowTypeSub - - -class docEntryTypeSub(supermod.docEntryType): - - node_type = "docentry" - - def __init__(self, thead=None, align=None, rowspan=None, colspan=None, para=None): - supermod.docEntryType.__init__(self, thead, align, rowspan, colspan, para) - - -supermod.docEntryType.subclass = docEntryTypeSub -# end class docEntryTypeSub - - -class docHeadingTypeSub(supermod.docHeadingType): - - node_type = "docheading" - - def __init__(self, level=None, valueOf_='', mixedclass_=None, content_=None): - supermod.docHeadingType.__init__(self, mixedclass_, content_) - - def buildChildren(self, child_, nodeName_): - supermod.docHeadingType.buildChildren(self, child_, nodeName_) - - # Account for styled content in the heading. This might need to be expanded to include other - # nodes as it seems from the xsd that headings can have a lot of different children but we - # really don't expect most of them to come up. - if child_.nodeType == Node.ELEMENT_NODE and ( - nodeName_ == 'bold' or - nodeName_ == 'emphasis' or - nodeName_ == 'computeroutput' or - nodeName_ == 'subscript' or - nodeName_ == 'superscript' or - nodeName_ == 'center' or - nodeName_ == 'small'): - obj_ = supermod.docMarkupType.factory() - obj_.build(child_) - obj_.type_ = nodeName_ - self.content_.append(obj_) - - -supermod.docHeadingType.subclass = docHeadingTypeSub -# end class docHeadingTypeSub - - -class docImageTypeSub(supermod.docImageType): - - node_type = "docimage" - - def __init__(self, width=None, type_=None, name=None, height=None, valueOf_='', - mixedclass_=None, content_=None): - supermod.docImageType.__init__(self, mixedclass_, content_) - - -supermod.docImageType.subclass = docImageTypeSub -# end class docImageTypeSub - - -class docDotFileTypeSub(supermod.docDotFileType): - - node_type = "docdotfile" - - def __init__(self, name=None, valueOf_='', mixedclass_=None, content_=None): - supermod.docDotFileType.__init__(self, mixedclass_, content_) - - -supermod.docDotFileType.subclass = docDotFileTypeSub -# end class docDotFileTypeSub - - -class docDotTypeSub(supermod.docDotType): - - node_type = "docdot" - - def __init__(self, name=None, valueOf_='', mixedclass_=None, content_=None): - supermod.docDotType.__init__(self, mixedclass_, content_) - - -supermod.docDotType.subclass = docDotTypeSub -# end class docDotTypeSub - - -class docTocItemTypeSub(supermod.docTocItemType): - - node_type = "doctocitem" - - def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None): - supermod.docTocItemType.__init__(self, mixedclass_, content_) - - -supermod.docTocItemType.subclass = docTocItemTypeSub -# end class docTocItemTypeSub - - -class docTocListTypeSub(supermod.docTocListType): - - node_type = "doctoclist" - - def __init__(self, tocitem=None): - supermod.docTocListType.__init__(self, tocitem) - - -supermod.docTocListType.subclass = docTocListTypeSub -# end class docTocListTypeSub - - -class docLanguageTypeSub(supermod.docLanguageType): - - node_type = "doclanguage" - - def __init__(self, langid=None, para=None): - supermod.docLanguageType.__init__(self, langid, para) - - -supermod.docLanguageType.subclass = docLanguageTypeSub -# end class docLanguageTypeSub - - -class docParamListTypeSub(supermod.docParamListType): - - node_type = "docparamlist" - - def __init__(self, kind=None, parameteritem=None): - supermod.docParamListType.__init__(self, kind, parameteritem) - - -supermod.docParamListType.subclass = docParamListTypeSub -# end class docParamListTypeSub - - -class docParamListItemSub(supermod.docParamListItem): - - node_type = "docparamlistitem" - - def __init__(self, parameternamelist=None, parameterdescription=None): - supermod.docParamListItem.__init__(self, parameternamelist, parameterdescription) - - -supermod.docParamListItem.subclass = docParamListItemSub -# end class docParamListItemSub - - -class docParamNameListSub(supermod.docParamNameList): - - node_type = "docparamnamelist" - - def __init__(self, parametername=None): - supermod.docParamNameList.__init__(self, parametername) - - -supermod.docParamNameList.subclass = docParamNameListSub -# end class docParamNameListSub - - -class docParamNameSub(supermod.docParamName): - - node_type = "docparamname" - - def __init__(self, direction=None, ref=None, mixedclass_=None, content_=None): - supermod.docParamName.__init__(self, mixedclass_, content_) - - -supermod.docParamName.subclass = docParamNameSub -# end class docParamNameSub - - -class docXRefSectTypeSub(supermod.docXRefSectType): - - node_type = "docxrefsect" - - def __init__(self, id=None, xreftitle=None, xrefdescription=None): - supermod.docXRefSectType.__init__(self, id, xreftitle, xrefdescription) - - -supermod.docXRefSectType.subclass = docXRefSectTypeSub -# end class docXRefSectTypeSub - - -class docVariableListTypeSub(supermod.docVariableListType): - - node_type = "docvariablelist" - - def __init__(self, valueOf_=''): - supermod.docVariableListType.__init__(self, valueOf_) - - self.varlistentries = [] - self.listitems = [] - - def buildChildren(self, child_, nodeName_): - supermod.docVariableListType.buildChildren(self, child_, nodeName_) - - if child_.nodeType == Node.ELEMENT_NODE and nodeName_ == "varlistentry": - obj_ = supermod.docVarListEntryType.factory() - obj_.build(child_) - self.varlistentries.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and nodeName_ == "listitem": - obj_ = supermod.docListItemType.factory() - obj_.build(child_) - self.listitems.append(obj_) - - -supermod.docVariableListType.subclass = docVariableListTypeSub -# end class docVariableListTypeSub - - -class docCopyTypeSub(supermod.docCopyType): - - node_type = "doccopy" - - def __init__(self, link=None, para=None, sect1=None, internal=None): - supermod.docCopyType.__init__(self, link, para, sect1, internal) - - -supermod.docCopyType.subclass = docCopyTypeSub -# end class docCopyTypeSub - - -class docCharTypeSub(supermod.docCharType): - - node_type = "docchar" - - def __init__(self, char=None, valueOf_=''): - supermod.docCharType.__init__(self, char) - - -supermod.docCharType.subclass = docCharTypeSub -# end class docCharTypeSub - - -class verbatimTypeSub(object): - """ - New node type. Structure is largely pillaged from other nodes in order to - match the set. - """ - - node_type = "verbatim" - - def __init__(self, valueOf_='', mixedclass_=None, content_=None): - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if content_ is None: - self.content_ = [] - else: - self.content_ = content_ - self.text = "" - - def factory(*args, **kwargs): - return verbatimTypeSub(*args, **kwargs) - - factory = staticmethod(factory) - - def buildAttributes(self, attrs): - pass - - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - self.text += child_.nodeValue - - -class docBlockQuoteTypeSub(supermod.docBlockQuoteType): - - node_type = "docblockquote" - - def __init__(self, para=None): - supermod.docBlockQuoteType.__init__(self, para) - - def buildChildren(self, child_, nodeName_): - supermod.docBlockQuoteType.buildChildren(self, child_, nodeName_) - - -supermod.docBlockQuoteType.subclass = docBlockQuoteTypeSub -# end class docBlockQuoteTypeSub - - -class docParaTypeSub(supermod.docParaType): - - node_type = "docpara" - - def __init__(self, char=None, valueOf_=''): - supermod.docParaType.__init__(self, char) - - self.parameterlist = [] - self.simplesects = [] - self.content = [] - self.programlisting = [] - self.images = [] - - self.ordered_children = [] - - def buildChildren(self, child_, nodeName_): - supermod.docParaType.buildChildren(self, child_, nodeName_) - - if child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) - self.content.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and nodeName_ == "ref": - obj_ = supermod.docRefTextType.factory() - obj_.build(child_) - self.content.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and nodeName_ == 'parameterlist': - obj_ = supermod.docParamListType.factory() - obj_.build(child_) - self.parameterlist.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and nodeName_ == 'simplesect': - obj_ = supermod.docSimpleSectType.factory() - obj_.build(child_) - self.simplesects.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and nodeName_ == 'programlisting': - obj_ = supermod.listingType.factory() - obj_.build(child_) - # Add programlisting nodes to self.content rather than self.programlisting, - # because programlisting and content nodes can interleave as shown in - # https://www.stack.nl/~dimitri/doxygen/manual/examples/include/html/example.html. - self.content.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and nodeName_ == 'image': - obj_ = supermod.docImageType.factory() - obj_.build(child_) - self.images.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and ( - nodeName_ == 'bold' or - nodeName_ == 'emphasis' or - nodeName_ == 'computeroutput' or - nodeName_ == 'subscript' or - nodeName_ == 'superscript' or - nodeName_ == 'center' or - nodeName_ == 'small'): - obj_ = supermod.docMarkupType.factory() - obj_.build(child_) - obj_.type_ = nodeName_ - self.content.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and nodeName_ == 'verbatim': - childobj_ = verbatimTypeSub.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, - 'verbatim', childobj_) - self.content.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and nodeName_ == 'formula': - childobj_ = docFormulaTypeSub.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, - 'formula', childobj_) - self.content.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and nodeName_ == "itemizedlist": - obj_ = supermod.docListType.factory(subtype="itemized") - obj_.build(child_) - self.content.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and nodeName_ == "orderedlist": - obj_ = supermod.docListType.factory(subtype="ordered") - obj_.build(child_) - self.content.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and nodeName_ == 'heading': - obj_ = supermod.docHeadingType.factory() - obj_.build(child_) - self.content.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and nodeName_ == 'ulink': - obj_ = supermod.docURLLink.factory() - obj_.build(child_) - self.content.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and nodeName_ == "xrefsect": - obj_ = supermod.docXRefSectType.factory() - obj_.build(child_) - self.content.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and nodeName_ == "variablelist": - obj_ = supermod.docVariableListType.factory() - obj_.build(child_) - self.content.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and nodeName_ == "anchor": - obj_ = supermod.docAnchorType.factory() - obj_.build(child_) - self.content.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and nodeName_ == "parblock": - obj_ = supermod.docParBlockType.factory() - obj_.build(child_) - self.content.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and nodeName_ == "blockquote": - obj_ = supermod.docBlockQuoteType.factory() - obj_.build(child_) - self.content.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and nodeName_ == "table": - obj_ = supermod.docTableType.factory() - obj_.build(child_) - self.content.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and nodeName_ == "dotfile": - obj_ = supermod.docDotFileType.factory() - obj_.build(child_) - self.content.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and nodeName_ == "dot": - obj_ = supermod.docDotType.factory() - obj_.build(child_) - self.content.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and ( - nodeName_ == "ndash" or nodeName_ == "mdash" - ): - # inject a emphasized dash unicode char as a placeholder/flag for rendering - # later. See visit_docblockquote() - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeText, "", "—") - self.content.append(obj_) - else: - obj_ = None - - if obj_: - self.ordered_children.append(obj_) - - -supermod.docParaType.subclass = docParaTypeSub -# end class docParaTypeSub - - -class docParBlockTypeSub(supermod.docParBlockType): - - node_type = "docparblock" - - def __init__(self, para=None): - supermod.docParBlockType.__init__(self, para) - - def buildChildren(self, child_, nodeName_): - supermod.docParBlockType.buildChildren(self, child_, nodeName_) - - -supermod.docParBlockType.subclass = docParBlockTypeSub -# end class docParBlockTypeSub - - -class docMarkupTypeSub(supermod.docMarkupType): - - node_type = "docmarkup" - - def __init__(self, valueOf_='', mixedclass_=None, content_=None): - supermod.docMarkupType.__init__(self, valueOf_, mixedclass_, content_) - self.type_ = None - - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', - child_.nodeValue) - self.content_.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and nodeName_ == 'ref': - childobj_ = supermod.docRefTextType.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'ref', - childobj_) - self.content_.append(obj_) - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]' - - -supermod.docMarkupType.subclass = docMarkupTypeSub -# end class docMarkupTypeSub - - -class docTitleTypeSub(supermod.docTitleType): - - node_type = "doctitle" - - def __init__(self, valueOf_='', mixedclass_=None, content_=None): - supermod.docTitleType.__init__(self, valueOf_, mixedclass_, content_) - self.type_ = None - - def buildChildren(self, child_, nodeName_): - supermod.docTitleType.buildChildren(self, child_, nodeName_) - - if child_.nodeType == Node.ELEMENT_NODE and nodeName_ == "ref": - obj_ = supermod.docRefTextType.factory() - obj_.build(child_) - self.content_.append(obj_) - self.valueOf_ += obj_.valueOf_ - elif child_.nodeType == Node.ELEMENT_NODE and nodeName_ == "anchor": - obj_ = supermod.docAnchorType.factory() - obj_.build(child_) - self.content_.append(obj_) - - -supermod.docTitleType.subclass = docTitleTypeSub -# end class docTitleTypeSub - - -class ParseError(Exception): - pass - - -class FileIOError(Exception): - pass - - -def parse(inFilename): - - try: - doc = minidom.parse(inFilename) - except IOError as e: - raise FileIOError(e) - except ExpatError as e: - raise ParseError(e) - - rootNode = doc.documentElement - rootObj = supermod.DoxygenType.factory() - rootObj.build(rootNode) - return rootObj diff --git a/breathe/parser/compoundsuper.py b/breathe/parser/compoundsuper.py deleted file mode 100644 index 79050edd..00000000 --- a/breathe/parser/compoundsuper.py +++ /dev/null @@ -1,6056 +0,0 @@ -# -# Generated Thu Jun 11 18:44:25 2009 by generateDS.py. -# - -import sys -import os -import getopt -from xml.dom import minidom -from xml.dom import Node - -from .. import filetypes -# -# User methods -# -# Calls to the methods in these classes are generated by generateDS.py. -# You can replace these methods by re-implementing the following class -# in a module named generatedssuper.py. - -try: - from generatedssuper import GeneratedsSuper -except ImportError as exp: - - class GeneratedsSuper: - def format_string(self, input_data, input_name=''): - return input_data - def format_integer(self, input_data, input_name=''): - return '%d' % input_data - def format_float(self, input_data, input_name=''): - return '%f' % input_data - def format_double(self, input_data, input_name=''): - return '%e' % input_data - def format_boolean(self, input_data, input_name=''): - return '%s' % input_data - - -# -# If you have installed IPython you can uncomment and use the following. -# IPython is available from http://ipython.scipy.org/. -# - -## from IPython.Shell import IPShellEmbed -## args = '' -## ipshell = IPShellEmbed(args, -## banner = 'Dropping into IPython', -## exit_msg = 'Leaving Interpreter, back to program.') - -# Then use the following line where and when you want to drop into the -# IPython shell: -# ipshell(' -- Entering ipshell.\nHit Ctrl-D to exit') - -# -# Globals -# - -ExternalEncoding = 'ascii' - -# -# Support/utility functions. -# - -def showIndent(outfile, level): - for idx in range(level): - outfile.write(' ') - -def quote_xml(inStr): - s1 = (isinstance(inStr, basestring) and inStr or - '%s' % inStr) - s1 = s1.replace('&', '&') - s1 = s1.replace('<', '<') - s1 = s1.replace('>', '>') - return s1 - -def quote_attrib(inStr): - s1 = (isinstance(inStr, basestring) and inStr or - '%s' % inStr) - s1 = s1.replace('&', '&') - s1 = s1.replace('<', '<') - s1 = s1.replace('>', '>') - if '"' in s1: - if "'" in s1: - s1 = '"%s"' % s1.replace('"', """) - else: - s1 = "'%s'" % s1 - else: - s1 = '"%s"' % s1 - return s1 - -def quote_python(inStr): - s1 = inStr - if s1.find("'") == -1: - if s1.find('\n') == -1: - return "'%s'" % s1 - else: - return "'''%s'''" % s1 - else: - if s1.find('"') != -1: - s1 = s1.replace('"', '\\"') - if s1.find('\n') == -1: - return '"%s"' % s1 - else: - return '"""%s"""' % s1 - - -class MixedContainer: - - node_type = "mixedcontainer" - - # Constants for category: - CategoryNone = 0 - CategoryText = 1 - CategorySimple = 2 - CategoryComplex = 3 - # Constants for content_type: - TypeNone = 0 - TypeText = 1 - TypeString = 2 - TypeInteger = 3 - TypeFloat = 4 - TypeDecimal = 5 - TypeDouble = 6 - TypeBoolean = 7 - def __init__(self, category, content_type, name, value): - self.category = category - self.content_type = content_type - self.name = name - self.value = value - def getCategory(self): - return self.category - def getContenttype(self, content_type): - return self.content_type - def getValue(self): - return self.value - def getName(self): - return self.name - - -class _MemberSpec(object): - def __init__(self, name='', data_type='', container=0): - self.name = name - self.data_type = data_type - self.container = container - def set_name(self, name): self.name = name - def get_name(self): return self.name - def set_data_type(self, data_type): self.data_type = data_type - def get_data_type(self): return self.data_type - def set_container(self, container): self.container = container - def get_container(self): return self.container - - -# -# Data representation classes. -# - -class DoxygenType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, version=None, compounddef=None): - self.version = version - self.compounddef = compounddef - def factory(*args_, **kwargs_): - if DoxygenType.subclass: - return DoxygenType.subclass(*args_, **kwargs_) - else: - return DoxygenType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_compounddef(self): return self.compounddef - def set_compounddef(self, compounddef): self.compounddef = compounddef - def get_version(self): return self.version - def set_version(self, version): self.version = version - def hasContent_(self): - if ( - self.compounddef is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('version'): - self.version = attrs.get('version').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'compounddef': - obj_ = compounddefType.factory() - obj_.build(child_) - self.set_compounddef(obj_) -# end class DoxygenType - - -class compounddefType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, kind=None, prot=None, id=None, compoundname=None, title=None, basecompoundref=None, derivedcompoundref=None, includes=None, includedby=None, incdepgraph=None, invincdepgraph=None, innerdir=None, innerfile=None, innerclass=None, innernamespace=None, innerpage=None, innergroup=None, templateparamlist=None, sectiondef=None, briefdescription=None, detaileddescription=None, inheritancegraph=None, collaborationgraph=None, programlisting=None, location=None, listofallmembers=None, language=None): - self.kind = kind - self.prot = prot - self.id = id - self.language = language - self.compoundname = compoundname - self.title = title - if basecompoundref is None: - self.basecompoundref = [] - else: - self.basecompoundref = basecompoundref - if derivedcompoundref is None: - self.derivedcompoundref = [] - else: - self.derivedcompoundref = derivedcompoundref - if includes is None: - self.includes = [] - else: - self.includes = includes - if includedby is None: - self.includedby = [] - else: - self.includedby = includedby - self.incdepgraph = incdepgraph - self.invincdepgraph = invincdepgraph - if innerdir is None: - self.innerdir = [] - else: - self.innerdir = innerdir - if innerfile is None: - self.innerfile = [] - else: - self.innerfile = innerfile - if innerclass is None: - self.innerclass = [] - else: - self.innerclass = innerclass - if innernamespace is None: - self.innernamespace = [] - else: - self.innernamespace = innernamespace - if innerpage is None: - self.innerpage = [] - else: - self.innerpage = innerpage - if innergroup is None: - self.innergroup = [] - else: - self.innergroup = innergroup - self.templateparamlist = templateparamlist - if sectiondef is None: - self.sectiondef = [] - else: - self.sectiondef = sectiondef - self.briefdescription = briefdescription - self.detaileddescription = detaileddescription - self.inheritancegraph = inheritancegraph - self.collaborationgraph = collaborationgraph - self.programlisting = programlisting - self.location = location - self.listofallmembers = listofallmembers - self.namespaces = [] - def factory(*args_, **kwargs_): - if compounddefType.subclass: - return compounddefType.subclass(*args_, **kwargs_) - else: - return compounddefType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_compoundname(self): return self.compoundname - def set_compoundname(self, compoundname): self.compoundname = compoundname - def get_title(self): return self.title - def set_title(self, title): self.title = title - def get_basecompoundref(self): return self.basecompoundref - def set_basecompoundref(self, basecompoundref): self.basecompoundref = basecompoundref - def add_basecompoundref(self, value): self.basecompoundref.append(value) - def insert_basecompoundref(self, index, value): self.basecompoundref[index] = value - def get_derivedcompoundref(self): return self.derivedcompoundref - def set_derivedcompoundref(self, derivedcompoundref): self.derivedcompoundref = derivedcompoundref - def add_derivedcompoundref(self, value): self.derivedcompoundref.append(value) - def insert_derivedcompoundref(self, index, value): self.derivedcompoundref[index] = value - def get_includes(self): return self.includes - def set_includes(self, includes): self.includes = includes - def add_includes(self, value): self.includes.append(value) - def insert_includes(self, index, value): self.includes[index] = value - def get_includedby(self): return self.includedby - def set_includedby(self, includedby): self.includedby = includedby - def add_includedby(self, value): self.includedby.append(value) - def insert_includedby(self, index, value): self.includedby[index] = value - def get_incdepgraph(self): return self.incdepgraph - def set_incdepgraph(self, incdepgraph): self.incdepgraph = incdepgraph - def get_invincdepgraph(self): return self.invincdepgraph - def set_invincdepgraph(self, invincdepgraph): self.invincdepgraph = invincdepgraph - def get_innerdir(self): return self.innerdir - def set_innerdir(self, innerdir): self.innerdir = innerdir - def add_innerdir(self, value): self.innerdir.append(value) - def insert_innerdir(self, index, value): self.innerdir[index] = value - def get_innerfile(self): return self.innerfile - def set_innerfile(self, innerfile): self.innerfile = innerfile - def add_innerfile(self, value): self.innerfile.append(value) - def insert_innerfile(self, index, value): self.innerfile[index] = value - def get_innerclass(self): return self.innerclass - def set_innerclass(self, innerclass): self.innerclass = innerclass - def add_innerclass(self, value): self.innerclass.append(value) - def insert_innerclass(self, index, value): self.innerclass[index] = value - def get_innernamespace(self): return self.innernamespace - def set_innernamespace(self, innernamespace): self.innernamespace = innernamespace - def add_innernamespace(self, value): self.innernamespace.append(value) - def insert_innernamespace(self, index, value): self.innernamespace[index] = value - def get_innerpage(self): return self.innerpage - def set_innerpage(self, innerpage): self.innerpage = innerpage - def add_innerpage(self, value): self.innerpage.append(value) - def insert_innerpage(self, index, value): self.innerpage[index] = value - def get_innergroup(self): return self.innergroup - def set_innergroup(self, innergroup): self.innergroup = innergroup - def add_innergroup(self, value): self.innergroup.append(value) - def insert_innergroup(self, index, value): self.innergroup[index] = value - def get_templateparamlist(self): return self.templateparamlist - def set_templateparamlist(self, templateparamlist): self.templateparamlist = templateparamlist - def get_sectiondef(self): return self.sectiondef - def set_sectiondef(self, sectiondef): self.sectiondef = sectiondef - def add_sectiondef(self, value): self.sectiondef.append(value) - def insert_sectiondef(self, index, value): self.sectiondef[index] = value - def get_briefdescription(self): return self.briefdescription - def set_briefdescription(self, briefdescription): self.briefdescription = briefdescription - def get_detaileddescription(self): return self.detaileddescription - def set_detaileddescription(self, detaileddescription): self.detaileddescription = detaileddescription - def get_inheritancegraph(self): return self.inheritancegraph - def set_inheritancegraph(self, inheritancegraph): self.inheritancegraph = inheritancegraph - def get_collaborationgraph(self): return self.collaborationgraph - def set_collaborationgraph(self, collaborationgraph): self.collaborationgraph = collaborationgraph - def get_programlisting(self): return self.programlisting - def set_programlisting(self, programlisting): self.programlisting = programlisting - def get_location(self): return self.location - def set_location(self, location): self.location = location - def get_listofallmembers(self): return self.listofallmembers - def set_listofallmembers(self, listofallmembers): self.listofallmembers = listofallmembers - def get_kind(self): return self.kind - def set_kind(self, kind): self.kind = kind - def get_prot(self): return self.prot - def set_prot(self, prot): self.prot = prot - def get_id(self): return self.id - def set_id(self, id): self.id = id - def hasContent_(self): - if ( - self.compoundname is not None or - self.title is not None or - self.basecompoundref is not None or - self.derivedcompoundref is not None or - self.includes is not None or - self.includedby is not None or - self.incdepgraph is not None or - self.invincdepgraph is not None or - self.innerdir is not None or - self.innerfile is not None or - self.innerclass is not None or - self.innernamespace is not None or - self.innerpage is not None or - self.innergroup is not None or - self.templateparamlist is not None or - self.sectiondef is not None or - self.briefdescription is not None or - self.detaileddescription is not None or - self.inheritancegraph is not None or - self.collaborationgraph is not None or - self.programlisting is not None or - self.location is not None or - self.listofallmembers is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('kind'): - self.kind = attrs.get('kind').value - if attrs.get('prot'): - self.prot = attrs.get('prot').value - if attrs.get('id'): - self.id = attrs.get('id').value - if attrs.get('language'): - self.language = attrs.get('language').value.lower() - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'compoundname': - compoundname_ = '' - for text__content_ in child_.childNodes: - compoundname_ += text__content_.nodeValue - self.compoundname = compoundname_ - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'title': - obj_ = docTitleType.factory() - obj_.build(child_) - self.set_title(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'basecompoundref': - obj_ = compoundRefType.factory() - obj_.build(child_) - self.basecompoundref.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'derivedcompoundref': - obj_ = compoundRefType.factory() - obj_.build(child_) - self.derivedcompoundref.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'includes': - obj_ = incType.factory() - obj_.build(child_) - self.includes.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'includedby': - obj_ = incType.factory() - obj_.build(child_) - self.includedby.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'incdepgraph': - obj_ = graphType.factory( - caption=f"Include dependency graph for {self.get_compoundname()}:" - ) - obj_.build(child_) - self.set_incdepgraph(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'invincdepgraph': - obj_ = graphType.factory( - direction="back", - caption=f"This graph shows which files directly " - f"or indirectly include {self.get_compoundname()}:" - ) - obj_.build(child_) - self.set_invincdepgraph(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'innerdir': - obj_ = refType.factory(nodeName_) - obj_.build(child_) - self.innerdir.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'innerfile': - obj_ = refType.factory(nodeName_) - obj_.build(child_) - self.innerfile.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'innerclass': - obj_ = refType.factory(nodeName_) - obj_.build(child_) - self.innerclass.append(obj_) - self.namespaces.append(obj_.content_[0].getValue()) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'innernamespace': - obj_ = refType.factory(nodeName_) - obj_.build(child_) - self.innernamespace.append(obj_) - self.namespaces.append(obj_.content_[0].getValue()) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'innerpage': - obj_ = refType.factory(nodeName_) - obj_.build(child_) - self.innerpage.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'innergroup': - obj_ = refType.factory(nodeName_) - obj_.build(child_) - self.innergroup.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'templateparamlist': - obj_ = templateparamlistType.factory() - obj_.build(child_) - self.set_templateparamlist(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'sectiondef': - obj_ = sectiondefType.factory() - obj_.build(child_) - self.sectiondef.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'briefdescription': - obj_ = descriptionType.factory() - obj_.build(child_) - self.set_briefdescription(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'detaileddescription': - obj_ = descriptionType.factory() - obj_.build(child_) - self.set_detaileddescription(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'inheritancegraph': - obj_ = graphType.factory( - caption=f"Inheritance diagram for {self.get_compoundname()}:" - ) - obj_.build(child_) - self.set_inheritancegraph(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'collaborationgraph': - obj_ = graphType.factory( - caption=f"Collaboration diagram for {self.get_compoundname()}:" - ) - obj_.build(child_) - self.set_collaborationgraph(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'programlisting': - obj_ = listingType.factory(domain=self.language) - obj_.build(child_) - self.set_programlisting(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'location': - obj_ = locationType.factory() - obj_.build(child_) - self.set_location(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'listofallmembers': - obj_ = listofallmembersType.factory() - obj_.build(child_) - self.set_listofallmembers(obj_) -# end class compounddefType - - -class listofallmembersType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, member=None): - if member is None: - self.member = [] - else: - self.member = member - def factory(*args_, **kwargs_): - if listofallmembersType.subclass: - return listofallmembersType.subclass(*args_, **kwargs_) - else: - return listofallmembersType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_member(self): return self.member - def set_member(self, member): self.member = member - def add_member(self, value): self.member.append(value) - def insert_member(self, index, value): self.member[index] = value - def hasContent_(self): - if ( - self.member is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'member': - obj_ = memberRefType.factory() - obj_.build(child_) - self.member.append(obj_) -# end class listofallmembersType - - -class memberRefType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, virt=None, prot=None, refid=None, ambiguityscope=None, scope=None, name=None): - self.virt = virt - self.prot = prot - self.refid = refid - self.ambiguityscope = ambiguityscope - self.scope = scope - self.name = name - def factory(*args_, **kwargs_): - if memberRefType.subclass: - return memberRefType.subclass(*args_, **kwargs_) - else: - return memberRefType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_scope(self): return self.scope - def set_scope(self, scope): self.scope = scope - def get_name(self): return self.name - def set_name(self, name): self.name = name - def get_virt(self): return self.virt - def set_virt(self, virt): self.virt = virt - def get_prot(self): return self.prot - def set_prot(self, prot): self.prot = prot - def get_refid(self): return self.refid - def set_refid(self, refid): self.refid = refid - def get_ambiguityscope(self): return self.ambiguityscope - def set_ambiguityscope(self, ambiguityscope): self.ambiguityscope = ambiguityscope - def hasContent_(self): - if ( - self.scope is not None or - self.name is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('virt'): - self.virt = attrs.get('virt').value - if attrs.get('prot'): - self.prot = attrs.get('prot').value - if attrs.get('refid'): - self.refid = attrs.get('refid').value - if attrs.get('ambiguityscope'): - self.ambiguityscope = attrs.get('ambiguityscope').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'scope': - scope_ = '' - for text__content_ in child_.childNodes: - scope_ += text__content_.nodeValue - self.scope = scope_ - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'name': - name_ = '' - for text__content_ in child_.childNodes: - name_ += text__content_.nodeValue - self.name = name_ -# end class memberRefType - - -class scope(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, valueOf_=''): - self.valueOf_ = valueOf_ - def factory(*args_, **kwargs_): - if scope.subclass: - return scope.subclass(*args_, **kwargs_) - else: - return scope(*args_, **kwargs_) - factory = staticmethod(factory) - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class scope - - -class name(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, valueOf_=''): - self.valueOf_ = valueOf_ - def factory(*args_, **kwargs_): - if name.subclass: - return name.subclass(*args_, **kwargs_) - else: - return name(*args_, **kwargs_) - factory = staticmethod(factory) - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class name - - -class compoundRefType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, virt=None, prot=None, refid=None, valueOf_='', mixedclass_=None, content_=None): - self.virt = virt - self.prot = prot - self.refid = refid - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if content_ is None: - self.content_ = [] - else: - self.content_ = content_ - def factory(*args_, **kwargs_): - if compoundRefType.subclass: - return compoundRefType.subclass(*args_, **kwargs_) - else: - return compoundRefType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_virt(self): return self.virt - def set_virt(self, virt): self.virt = virt - def get_prot(self): return self.prot - def set_prot(self, prot): self.prot = prot - def get_refid(self): return self.refid - def set_refid(self, refid): self.refid = refid - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('virt'): - self.virt = attrs.get('virt').value - if attrs.get('prot'): - self.prot = attrs.get('prot').value - if attrs.get('refid'): - self.refid = attrs.get('refid').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) - self.content_.append(obj_) - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class compoundRefType - - -class reimplementType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, refid=None, valueOf_='', mixedclass_=None, content_=None): - self.refid = refid - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if content_ is None: - self.content_ = [] - else: - self.content_ = content_ - def factory(*args_, **kwargs_): - if reimplementType.subclass: - return reimplementType.subclass(*args_, **kwargs_) - else: - return reimplementType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_refid(self): return self.refid - def set_refid(self, refid): self.refid = refid - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('refid'): - self.refid = attrs.get('refid').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) - self.content_.append(obj_) - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class reimplementType - - -class incType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, local=None, refid=None, valueOf_='', mixedclass_=None, content_=None): - self.local = local - self.refid = refid - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if content_ is None: - self.content_ = [] - else: - self.content_ = content_ - def factory(*args_, **kwargs_): - if incType.subclass: - return incType.subclass(*args_, **kwargs_) - else: - return incType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_local(self): return self.local - def set_local(self, local): self.local = local - def get_refid(self): return self.refid - def set_refid(self, refid): self.refid = refid - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('local'): - self.local = attrs.get('local').value - if attrs.get('refid'): - self.refid = attrs.get('refid').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) - self.content_.append(obj_) - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class incType - - -class refType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, prot=None, refid=None, valueOf_='', mixedclass_=None, content_=None): - self.prot = prot - self.refid = refid - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if content_ is None: - self.content_ = [] - else: - self.content_ = content_ - def factory(*args_, **kwargs_): - if refType.subclass: - return refType.subclass(*args_, **kwargs_) - else: - return refType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_prot(self): return self.prot - def set_prot(self, prot): self.prot = prot - def get_refid(self): return self.refid - def set_refid(self, refid): self.refid = refid - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('prot'): - self.prot = attrs.get('prot').value - if attrs.get('refid'): - self.refid = attrs.get('refid').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) - self.content_.append(obj_) - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class refType - - -class refTextType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, refid=None, kindref=None, external=None, valueOf_='', mixedclass_=None, content_=None): - self.refid = refid - self.kindref = kindref - self.external = external - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if content_ is None: - self.content_ = [] - else: - self.content_ = content_ - def factory(*args_, **kwargs_): - if refTextType.subclass: - return refTextType.subclass(*args_, **kwargs_) - else: - return refTextType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_refid(self): return self.refid - def set_refid(self, refid): self.refid = refid - def get_kindref(self): return self.kindref - def set_kindref(self, kindref): self.kindref = kindref - def get_external(self): return self.external - def set_external(self, external): self.external = external - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('refid'): - self.refid = attrs.get('refid').value - if attrs.get('kindref'): - self.kindref = attrs.get('kindref').value - if attrs.get('external'): - self.external = attrs.get('external').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) - self.content_.append(obj_) - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class refTextType - - -class sectiondefType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, kind=None, header=None, description=None, memberdef=None): - self.kind = kind - self.header = header - self.description = description - if memberdef is None: - self.memberdef = [] - else: - self.memberdef = memberdef - def factory(*args_, **kwargs_): - if sectiondefType.subclass: - return sectiondefType.subclass(*args_, **kwargs_) - else: - return sectiondefType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_header(self): return self.header - def set_header(self, header): self.header = header - def get_description(self): return self.description - def set_description(self, description): self.description = description - def get_memberdef(self): return self.memberdef - def set_memberdef(self, memberdef): self.memberdef = memberdef - def add_memberdef(self, value): self.memberdef.append(value) - def insert_memberdef(self, index, value): self.memberdef[index] = value - def get_kind(self): return self.kind - def set_kind(self, kind): self.kind = kind - def hasContent_(self): - if ( - self.header is not None or - self.description is not None or - self.memberdef is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('kind'): - self.kind = attrs.get('kind').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'header': - header_ = '' - for text__content_ in child_.childNodes: - header_ += text__content_.nodeValue - self.header = header_ - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'description': - obj_ = descriptionType.factory() - obj_.build(child_) - self.set_description(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'memberdef': - obj_ = memberdefType.factory() - obj_.build(child_) - self.memberdef.append(obj_) -# end class sectiondefType - - -class memberdefType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, initonly=None, kind=None, volatile=None, const=None, raisexx=None, virt=None, readable=None, prot=None, explicit=None, new=None, final=None, writable=None, add=None, static=None, strong=None, remove=None, sealed=None, mutable=None, gettable=None, inline=None, settable=None, id=None, templateparamlist=None, type_=None, definition=None, argsstring=None, name=None, read=None, write=None, bitfield=None, reimplements=None, reimplementedby=None, param=None, enumvalue=None, initializer=None, exceptions=None, briefdescription=None, detaileddescription=None, inbodydescription=None, location=None, references=None, referencedby=None, refqual=None): - self.initonly = initonly - self.kind = kind - self.volatile = volatile - self.const = const - self.raisexx = raisexx - self.virt = virt - self.readable = readable - self.prot = prot - self.explicit = explicit - self.new = new - self.final = final - self.writable = writable - self.add = add - self.static = static - self.strong = strong - self.remove = remove - self.sealed = sealed - self.mutable = mutable - self.gettable = gettable - self.inline = inline - self.settable = settable - self.id = id - self.templateparamlist = templateparamlist - self.type_ = type_ - self.definition = definition - self.argsstring = argsstring - self.name = name - self.read = read - self.write = write - self.bitfield = bitfield - if reimplements is None: - self.reimplements = [] - else: - self.reimplements = reimplements - if reimplementedby is None: - self.reimplementedby = [] - else: - self.reimplementedby = reimplementedby - if param is None: - self.param = [] - else: - self.param = param - if enumvalue is None: - self.enumvalue = [] - else: - self.enumvalue = enumvalue - self.initializer = initializer - self.exceptions = exceptions - self.briefdescription = briefdescription - self.detaileddescription = detaileddescription - self.inbodydescription = inbodydescription - self.location = location - if references is None: - self.references = [] - else: - self.references = references - if referencedby is None: - self.referencedby = [] - else: - self.referencedby = referencedby - self.refqual = refqual - def factory(*args_, **kwargs_): - if memberdefType.subclass: - return memberdefType.subclass(*args_, **kwargs_) - else: - return memberdefType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_templateparamlist(self): return self.templateparamlist - def set_templateparamlist(self, templateparamlist): self.templateparamlist = templateparamlist - def get_type(self): return self.type_ - def set_type(self, type_): self.type_ = type_ - def get_definition(self): return self.definition - def set_definition(self, definition): self.definition = definition - def get_argsstring(self): return self.argsstring - def set_argsstring(self, argsstring): self.argsstring = argsstring - def get_name(self): return self.name - def set_name(self, name): self.name = name - def get_read(self): return self.read - def set_read(self, read): self.read = read - def get_write(self): return self.write - def set_write(self, write): self.write = write - def get_bitfield(self): return self.bitfield - def set_bitfield(self, bitfield): self.bitfield = bitfield - def get_reimplements(self): return self.reimplements - def set_reimplements(self, reimplements): self.reimplements = reimplements - def add_reimplements(self, value): self.reimplements.append(value) - def insert_reimplements(self, index, value): self.reimplements[index] = value - def get_reimplementedby(self): return self.reimplementedby - def set_reimplementedby(self, reimplementedby): self.reimplementedby = reimplementedby - def add_reimplementedby(self, value): self.reimplementedby.append(value) - def insert_reimplementedby(self, index, value): self.reimplementedby[index] = value - def get_param(self): return self.param - def set_param(self, param): self.param = param - def add_param(self, value): self.param.append(value) - def insert_param(self, index, value): self.param[index] = value - def get_enumvalue(self): return self.enumvalue - def set_enumvalue(self, enumvalue): self.enumvalue = enumvalue - def add_enumvalue(self, value): self.enumvalue.append(value) - def insert_enumvalue(self, index, value): self.enumvalue[index] = value - def get_initializer(self): return self.initializer - def set_initializer(self, initializer): self.initializer = initializer - def get_exceptions(self): return self.exceptions - def set_exceptions(self, exceptions): self.exceptions = exceptions - def get_briefdescription(self): return self.briefdescription - def set_briefdescription(self, briefdescription): self.briefdescription = briefdescription - def get_detaileddescription(self): return self.detaileddescription - def set_detaileddescription(self, detaileddescription): self.detaileddescription = detaileddescription - def get_inbodydescription(self): return self.inbodydescription - def set_inbodydescription(self, inbodydescription): self.inbodydescription = inbodydescription - def get_location(self): return self.location - def set_location(self, location): self.location = location - def get_references(self): return self.references - def set_references(self, references): self.references = references - def add_references(self, value): self.references.append(value) - def insert_references(self, index, value): self.references[index] = value - def get_referencedby(self): return self.referencedby - def set_referencedby(self, referencedby): self.referencedby = referencedby - def add_referencedby(self, value): self.referencedby.append(value) - def insert_referencedby(self, index, value): self.referencedby[index] = value - def get_initonly(self): return self.initonly - def set_initonly(self, initonly): self.initonly = initonly - def get_kind(self): return self.kind - def set_kind(self, kind): self.kind = kind - def get_volatile(self): return self.volatile - def set_volatile(self, volatile): self.volatile = volatile - def get_const(self): return self.const - def set_const(self, const): self.const = const - def get_raise(self): return self.raisexx - def set_raise(self, raisexx): self.raisexx = raisexx - def get_virt(self): return self.virt - def set_virt(self, virt): self.virt = virt - def get_readable(self): return self.readable - def set_readable(self, readable): self.readable = readable - def get_prot(self): return self.prot - def set_prot(self, prot): self.prot = prot - def get_explicit(self): return self.explicit - def set_explicit(self, explicit): self.explicit = explicit - def get_new(self): return self.new - def set_new(self, new): self.new = new - def get_final(self): return self.final - def set_final(self, final): self.final = final - def get_writable(self): return self.writable - def set_writable(self, writable): self.writable = writable - def get_add(self): return self.add - def set_add(self, add): self.add = add - def get_static(self): return self.static - def set_static(self, static): self.static = static - def get_strong(self): return self.strong - def set_strong(self, strong): self.strong = strong - def get_remove(self): return self.remove - def set_remove(self, remove): self.remove = remove - def get_sealed(self): return self.sealed - def set_sealed(self, sealed): self.sealed = sealed - def get_mutable(self): return self.mutable - def set_mutable(self, mutable): self.mutable = mutable - def get_gettable(self): return self.gettable - def set_gettable(self, gettable): self.gettable = gettable - def get_inline(self): return self.inline - def set_inline(self, inline): self.inline = inline - def get_settable(self): return self.settable - def set_settable(self, settable): self.settable = settable - def get_id(self): return self.id - def set_id(self, id): self.id = id - def get_refqual(self): return self.refqual - def set_refqual(self, refqual): self.refqual = refqual - def hasContent_(self): - if ( - self.templateparamlist is not None or - self.type_ is not None or - self.definition is not None or - self.argsstring is not None or - self.name is not None or - self.read is not None or - self.write is not None or - self.bitfield is not None or - self.reimplements is not None or - self.reimplementedby is not None or - self.param is not None or - self.enumvalue is not None or - self.initializer is not None or - self.exceptions is not None or - self.briefdescription is not None or - self.detaileddescription is not None or - self.inbodydescription is not None or - self.location is not None or - self.references is not None or - self.referencedby is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('initonly'): - self.initonly = attrs.get('initonly').value - if attrs.get('kind'): - self.kind = attrs.get('kind').value - if attrs.get('volatile'): - self.volatile = attrs.get('volatile').value - if attrs.get('const'): - self.const = attrs.get('const').value - if attrs.get('raise'): - self.raisexx = attrs.get('raise').value - if attrs.get('virt'): - self.virt = attrs.get('virt').value - if attrs.get('readable'): - self.readable = attrs.get('readable').value - if attrs.get('prot'): - self.prot = attrs.get('prot').value - if attrs.get('explicit'): - self.explicit = attrs.get('explicit').value - if attrs.get('new'): - self.new = attrs.get('new').value - if attrs.get('final'): - self.final = attrs.get('final').value - if attrs.get('writable'): - self.writable = attrs.get('writable').value - if attrs.get('add'): - self.add = attrs.get('add').value - if attrs.get('static'): - self.static = attrs.get('static').value - if attrs.get('strong'): - self.strong = attrs.get('strong').value - if attrs.get('remove'): - self.remove = attrs.get('remove').value - if attrs.get('sealed'): - self.sealed = attrs.get('sealed').value - if attrs.get('mutable'): - self.mutable = attrs.get('mutable').value - if attrs.get('gettable'): - self.gettable = attrs.get('gettable').value - if attrs.get('inline'): - self.inline = attrs.get('inline').value - if attrs.get('settable'): - self.settable = attrs.get('settable').value - if attrs.get('id'): - self.id = attrs.get('id').value - if attrs.get('refqual'): - self.refqual = attrs.get('refqual').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'templateparamlist': - obj_ = templateparamlistType.factory() - obj_.build(child_) - self.set_templateparamlist(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'type': - obj_ = linkedTextType.factory() - obj_.build(child_) - self.set_type(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'definition': - definition_ = '' - for text__content_ in child_.childNodes: - definition_ += text__content_.nodeValue - self.definition = definition_ - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'argsstring': - argsstring_ = '' - for text__content_ in child_.childNodes: - argsstring_ += text__content_.nodeValue - self.argsstring = argsstring_ - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'name': - name_ = '' - for text__content_ in child_.childNodes: - name_ += text__content_.nodeValue - self.name = name_ - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'read': - read_ = '' - for text__content_ in child_.childNodes: - read_ += text__content_.nodeValue - self.read = read_ - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'write': - write_ = '' - for text__content_ in child_.childNodes: - write_ += text__content_.nodeValue - self.write = write_ - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'bitfield': - bitfield_ = '' - for text__content_ in child_.childNodes: - bitfield_ += text__content_.nodeValue - self.bitfield = bitfield_ - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'reimplements': - obj_ = reimplementType.factory() - obj_.build(child_) - self.reimplements.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'reimplementedby': - obj_ = reimplementType.factory() - obj_.build(child_) - self.reimplementedby.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'param': - obj_ = paramType.factory() - obj_.build(child_) - self.param.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'enumvalue': - obj_ = enumvalueType.factory() - obj_.build(child_) - self.enumvalue.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'initializer': - obj_ = linkedTextType.factory() - obj_.build(child_) - self.set_initializer(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'exceptions': - obj_ = linkedTextType.factory() - obj_.build(child_) - self.set_exceptions(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'briefdescription': - obj_ = descriptionType.factory() - obj_.build(child_) - self.set_briefdescription(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'detaileddescription': - obj_ = descriptionType.factory() - obj_.build(child_) - self.set_detaileddescription(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'inbodydescription': - obj_ = descriptionType.factory() - obj_.build(child_) - self.set_inbodydescription(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'location': - obj_ = locationType.factory() - obj_.build(child_) - self.set_location(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'references': - obj_ = referenceType.factory() - obj_.build(child_) - self.references.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'referencedby': - obj_ = referenceType.factory() - obj_.build(child_) - self.referencedby.append(obj_) -# end class memberdefType - - -class definition(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, valueOf_=''): - self.valueOf_ = valueOf_ - def factory(*args_, **kwargs_): - if definition.subclass: - return definition.subclass(*args_, **kwargs_) - else: - return definition(*args_, **kwargs_) - factory = staticmethod(factory) - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class definition - - -class argsstring(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, valueOf_=''): - self.valueOf_ = valueOf_ - def factory(*args_, **kwargs_): - if argsstring.subclass: - return argsstring.subclass(*args_, **kwargs_) - else: - return argsstring(*args_, **kwargs_) - factory = staticmethod(factory) - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def export(self, outfile, level, namespace_='', name_='argsstring', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='argsstring') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class argsstring - - -class read(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, valueOf_=''): - self.valueOf_ = valueOf_ - def factory(*args_, **kwargs_): - if read.subclass: - return read.subclass(*args_, **kwargs_) - else: - return read(*args_, **kwargs_) - factory = staticmethod(factory) - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class read - - -class write(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, valueOf_=''): - self.valueOf_ = valueOf_ - def factory(*args_, **kwargs_): - if write.subclass: - return write.subclass(*args_, **kwargs_) - else: - return write(*args_, **kwargs_) - factory = staticmethod(factory) - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class write - - -class bitfield(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, valueOf_=''): - self.valueOf_ = valueOf_ - def factory(*args_, **kwargs_): - if bitfield.subclass: - return bitfield.subclass(*args_, **kwargs_) - else: - return bitfield(*args_, **kwargs_) - factory = staticmethod(factory) - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class bitfield - - -class descriptionType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, title=None, para=None, sect1=None, internal=None, mixedclass_=None, content_=None): - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if content_ is None: - self.content_ = [] - else: - self.content_ = content_ - def factory(*args_, **kwargs_): - if descriptionType.subclass: - return descriptionType.subclass(*args_, **kwargs_) - else: - return descriptionType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_title(self): return self.title - def set_title(self, title): self.title = title - def get_para(self): return self.para - def set_para(self, para): self.para = para - def add_para(self, value): self.para.append(value) - def insert_para(self, index, value): self.para[index] = value - def get_sect1(self): return self.sect1 - def set_sect1(self, sect1): self.sect1 = sect1 - def add_sect1(self, value): self.sect1.append(value) - def insert_sect1(self, index, value): self.sect1[index] = value - def get_internal(self): return self.internal - def set_internal(self, internal): self.internal = internal - def hasContent_(self): - if ( - self.title is not None or - self.para is not None or - self.sect1 is not None or - self.internal is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'title': - childobj_ = docTitleType.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'title', childobj_) - self.content_.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'para': - childobj_ = docParaType.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'para', childobj_) - self.content_.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'sect1': - childobj_ = docSect1Type.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'sect1', childobj_) - self.content_.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'internal': - childobj_ = docInternalType.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'internal', childobj_) - self.content_.append(obj_) -# end class descriptionType - - -class enumvalueType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, prot=None, id=None, name=None, initializer=None, briefdescription=None, detaileddescription=None, mixedclass_=None, content_=None): - self.prot = prot - self.id = id - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if content_ is None: - self.content_ = [] - else: - self.content_ = content_ - def factory(*args_, **kwargs_): - if enumvalueType.subclass: - return enumvalueType.subclass(*args_, **kwargs_) - else: - return enumvalueType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_name(self): return self.name - def set_name(self, name): self.name = name - def get_initializer(self): return self.initializer - def set_initializer(self, initializer): self.initializer = initializer - def get_briefdescription(self): return self.briefdescription - def set_briefdescription(self, briefdescription): self.briefdescription = briefdescription - def get_detaileddescription(self): return self.detaileddescription - def set_detaileddescription(self, detaileddescription): self.detaileddescription = detaileddescription - def get_prot(self): return self.prot - def set_prot(self, prot): self.prot = prot - def get_id(self): return self.id - def set_id(self, id): self.id = id - def hasContent_(self): - if ( - self.name is not None or - self.initializer is not None or - self.briefdescription is not None or - self.detaileddescription is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('prot'): - self.prot = attrs.get('prot').value - if attrs.get('id'): - self.id = attrs.get('id').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'name': - value_ = [] - for text_ in child_.childNodes: - value_.append(text_.nodeValue) - valuestr_ = ''.join(value_) - obj_ = self.mixedclass_(MixedContainer.CategorySimple, - MixedContainer.TypeString, 'name', valuestr_) - self.content_.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'initializer': - childobj_ = linkedTextType.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'initializer', childobj_) - self.content_.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'briefdescription': - childobj_ = descriptionType.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'briefdescription', childobj_) - self.content_.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'detaileddescription': - childobj_ = descriptionType.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'detaileddescription', childobj_) - self.content_.append(obj_) - elif child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) - self.content_.append(obj_) -# end class enumvalueType - - -class templateparamlistType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, param=None): - if param is None: - self.param = [] - else: - self.param = param - def factory(*args_, **kwargs_): - if templateparamlistType.subclass: - return templateparamlistType.subclass(*args_, **kwargs_) - else: - return templateparamlistType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_param(self): return self.param - def set_param(self, param): self.param = param - def add_param(self, value): self.param.append(value) - def insert_param(self, index, value): self.param[index] = value - def hasContent_(self): - if ( - self.param is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'param': - obj_ = paramType.factory() - obj_.build(child_) - self.param.append(obj_) -# end class templateparamlistType - - -class paramType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, type_=None, declname=None, defname=None, array=None, defval=None, briefdescription=None): - self.type_ = type_ - self.declname = declname - self.defname = defname - self.array = array - self.defval = defval - self.briefdescription = briefdescription - def factory(*args_, **kwargs_): - if paramType.subclass: - return paramType.subclass(*args_, **kwargs_) - else: - return paramType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_type(self): return self.type_ - def set_type(self, type_): self.type_ = type_ - def get_declname(self): return self.declname - def set_declname(self, declname): self.declname = declname - def get_defname(self): return self.defname - def set_defname(self, defname): self.defname = defname - def get_array(self): return self.array - def set_array(self, array): self.array = array - def get_defval(self): return self.defval - def set_defval(self, defval): self.defval = defval - def get_briefdescription(self): return self.briefdescription - def set_briefdescription(self, briefdescription): self.briefdescription = briefdescription - def hasContent_(self): - if ( - self.type_ is not None or - self.declname is not None or - self.defname is not None or - self.array is not None or - self.defval is not None or - self.briefdescription is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'type': - obj_ = linkedTextType.factory() - obj_.build(child_) - self.set_type(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'declname': - declname_ = '' - for text__content_ in child_.childNodes: - declname_ += text__content_.nodeValue - self.declname = declname_ - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'defname': - defname_ = '' - for text__content_ in child_.childNodes: - defname_ += text__content_.nodeValue - self.defname = defname_ - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'array': - array_ = '' - for text__content_ in child_.childNodes: - array_ += text__content_.nodeValue - self.array = array_ - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'defval': - obj_ = linkedTextType.factory() - obj_.build(child_) - self.set_defval(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'briefdescription': - obj_ = descriptionType.factory() - obj_.build(child_) - self.set_briefdescription(obj_) -# end class paramType - - -class declname(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, valueOf_=''): - self.valueOf_ = valueOf_ - def factory(*args_, **kwargs_): - if declname.subclass: - return declname.subclass(*args_, **kwargs_) - else: - return declname(*args_, **kwargs_) - factory = staticmethod(factory) - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class declname - - -class defname(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, valueOf_=''): - self.valueOf_ = valueOf_ - def factory(*args_, **kwargs_): - if defname.subclass: - return defname.subclass(*args_, **kwargs_) - else: - return defname(*args_, **kwargs_) - factory = staticmethod(factory) - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class defname - - -class array(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, valueOf_=''): - self.valueOf_ = valueOf_ - def factory(*args_, **kwargs_): - if array.subclass: - return array.subclass(*args_, **kwargs_) - else: - return array(*args_, **kwargs_) - factory = staticmethod(factory) - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class array - - -class linkedTextType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, ref=None, mixedclass_=None, content_=None): - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if content_ is None: - self.content_ = [] - else: - self.content_ = content_ - def factory(*args_, **kwargs_): - if linkedTextType.subclass: - return linkedTextType.subclass(*args_, **kwargs_) - else: - return linkedTextType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_ref(self): return self.ref - def set_ref(self, ref): self.ref = ref - def add_ref(self, value): self.ref.append(value) - def insert_ref(self, index, value): self.ref[index] = value - def hasContent_(self): - if ( - self.ref is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'ref': - childobj_ = docRefTextType.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'ref', childobj_) - self.content_.append(obj_) - elif child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) - self.content_.append(obj_) -# end class linkedTextType - - -class graphType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, node=None, direction: str = "forward", caption:str = ""): - if node is None: - self.node = [] - else: - self.node = node - self.direction = direction - self.caption = caption - def factory(*args_, **kwargs_): - if graphType.subclass: - return graphType.subclass(*args_, **kwargs_) - else: - return graphType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_direction(self): return self.direction - def set_direction(self, direction): self.direction = direction - def get_caption(self): return self.caption - def set_caption(self, caption): self.caption = caption - def get_node(self): return self.node - def set_node(self, node): self.node = node - def add_node(self, value): self.node.append(value) - def insert_node(self, index, value): self.node[index] = value - def hasContent_(self): - if ( - self.node is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'node': - obj_ = nodeType.factory() - obj_.build(child_) - self.node.append(obj_) -# end class graphType - - -class nodeType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, id=None, label=None, link=None, childnode=None): - self.id = id - self.label = label - self.link = link - if childnode is None: - self.childnode = [] - else: - self.childnode = childnode - def factory(*args_, **kwargs_): - if nodeType.subclass: - return nodeType.subclass(*args_, **kwargs_) - else: - return nodeType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_label(self): return self.label - def set_label(self, label): self.label = label - def get_link(self): return self.link - def set_link(self, link): self.link = link - def get_childnode(self): return self.childnode - def set_childnode(self, childnode): self.childnode = childnode - def add_childnode(self, value): self.childnode.append(value) - def insert_childnode(self, index, value): self.childnode[index] = value - def get_id(self): return self.id - def set_id(self, id): self.id = id - def hasContent_(self): - if ( - self.label is not None or - self.link is not None or - self.childnode is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('id'): - self.id = attrs.get('id').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'label': - label_ = '' - for text__content_ in child_.childNodes: - label_ += text__content_.nodeValue - self.label = label_ - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'link': - obj_ = linkType.factory() - obj_.build(child_) - self.set_link(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'childnode': - obj_ = childnodeType.factory() - obj_.build(child_) - self.childnode.append(obj_) -# end class nodeType - - -class label(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, valueOf_=''): - self.valueOf_ = valueOf_ - def factory(*args_, **kwargs_): - if label.subclass: - return label.subclass(*args_, **kwargs_) - else: - return label(*args_, **kwargs_) - factory = staticmethod(factory) - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class label - - -class childnodeType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, relation=None, refid=None, edgelabel=None): - self.relation = relation - self.refid = refid - if edgelabel is None: - self.edgelabel = [] - else: - self.edgelabel = edgelabel - def factory(*args_, **kwargs_): - if childnodeType.subclass: - return childnodeType.subclass(*args_, **kwargs_) - else: - return childnodeType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_edgelabel(self): return self.edgelabel - def set_edgelabel(self, edgelabel): self.edgelabel = edgelabel - def add_edgelabel(self, value): self.edgelabel.append(value) - def insert_edgelabel(self, index, value): self.edgelabel[index] = value - def get_relation(self): return self.relation - def set_relation(self, relation): self.relation = relation - def get_refid(self): return self.refid - def set_refid(self, refid): self.refid = refid - def export(self, outfile, level, namespace_='', name_='childnodeType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='childnodeType') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='childnodeType'): - if self.relation is not None: - outfile.write(' relation=%s' % (quote_attrib(self.relation), )) - if self.refid is not None: - outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) - def exportChildren(self, outfile, level, namespace_='', name_='childnodeType'): - for edgelabel_ in self.edgelabel: - showIndent(outfile, level) - outfile.write('<%sedgelabel>%s\n' % (namespace_, self.format_string(quote_xml(edgelabel_).encode(ExternalEncoding), input_name='edgelabel'), namespace_)) - def hasContent_(self): - if ( - self.edgelabel is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('relation'): - self.relation = attrs.get('relation').value - if attrs.get('refid'): - self.refid = attrs.get('refid').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'edgelabel': - edgelabel_ = '' - for text__content_ in child_.childNodes: - edgelabel_ += text__content_.nodeValue - self.edgelabel.append(edgelabel_) -# end class childnodeType - - -class edgelabel(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, valueOf_=''): - self.valueOf_ = valueOf_ - def factory(*args_, **kwargs_): - if edgelabel.subclass: - return edgelabel.subclass(*args_, **kwargs_) - else: - return edgelabel(*args_, **kwargs_) - factory = staticmethod(factory) - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def export(self, outfile, level, namespace_='', name_='edgelabel', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='edgelabel') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='edgelabel'): - pass - def exportChildren(self, outfile, level, namespace_='', name_='edgelabel'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') - outfile.write(value) - else: - outfile.write(quote_xml('%s' % self.valueOf_)) - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class edgelabel - - -class linkType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, refid=None, external=None, valueOf_=''): - self.refid = refid - self.external = external - self.valueOf_ = valueOf_ - def factory(*args_, **kwargs_): - if linkType.subclass: - return linkType.subclass(*args_, **kwargs_) - else: - return linkType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_refid(self): return self.refid - def set_refid(self, refid): self.refid = refid - def get_external(self): return self.external - def set_external(self, external): self.external = external - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def export(self, outfile, level, namespace_='', name_='linkType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='linkType') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='linkType'): - if self.refid is not None: - outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) - if self.external is not None: - outfile.write(' external=%s' % (self.format_string(quote_attrib(self.external).encode(ExternalEncoding), input_name='external'), )) - def exportChildren(self, outfile, level, namespace_='', name_='linkType'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') - outfile.write(value) - else: - outfile.write(quote_xml('%s' % self.valueOf_)) - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('refid'): - self.refid = attrs.get('refid').value - if attrs.get('external'): - self.external = attrs.get('external').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class linkType - - -class listingType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, codeline=None, domain: str=None): - self.domain = domain - if codeline is None: - self.codeline = [] - else: - self.codeline = codeline - def factory(*args_, **kwargs_): - if listingType.subclass: - return listingType.subclass(*args_, **kwargs_) - else: - return listingType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_codeline(self): return self.codeline - def set_codeline(self, codeline): self.codeline = codeline - def add_codeline(self, value): self.codeline.append(value) - def insert_codeline(self, index, value): self.codeline[index] = value - def export(self, outfile, level, namespace_='', name_='listingType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='listingType') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='listingType'): - pass - def exportChildren(self, outfile, level, namespace_='', name_='listingType'): - for codeline_ in self.codeline: - codeline_.export(outfile, level, namespace_, name_='codeline') - def hasContent_(self): - if ( - self.codeline is not None - ): - return True - else: - return False - def build(self, node_: minidom.Element): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs: minidom.NamedNodeMap): - if "filename" in attrs: - # extract the domain for this programlisting tag. - filename = attrs["filename"].value - self.domain = filetypes.get_pygments_alias(filename) or filetypes.get_extension(filename) - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'codeline': - obj_ = codelineType.factory() - obj_.build(child_) - self.codeline.append(obj_) -# end class listingType - - -class codelineType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, external=None, lineno=None, refkind=None, refid=None, highlight=None): - self.external = external - self.lineno = lineno - self.refkind = refkind - self.refid = refid - if highlight is None: - self.highlight = [] - else: - self.highlight = highlight - def factory(*args_, **kwargs_): - if codelineType.subclass: - return codelineType.subclass(*args_, **kwargs_) - else: - return codelineType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_highlight(self): return self.highlight - def set_highlight(self, highlight): self.highlight = highlight - def add_highlight(self, value): self.highlight.append(value) - def insert_highlight(self, index, value): self.highlight[index] = value - def get_external(self): return self.external - def set_external(self, external): self.external = external - def get_lineno(self): return self.lineno - def set_lineno(self, lineno): self.lineno = lineno - def get_refkind(self): return self.refkind - def set_refkind(self, refkind): self.refkind = refkind - def get_refid(self): return self.refid - def set_refid(self, refid): self.refid = refid - def export(self, outfile, level, namespace_='', name_='codelineType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='codelineType') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='codelineType'): - if self.external is not None: - outfile.write(' external=%s' % (quote_attrib(self.external), )) - if self.lineno is not None: - outfile.write(' lineno="%s"' % self.format_integer(self.lineno, input_name='lineno')) - if self.refkind is not None: - outfile.write(' refkind=%s' % (quote_attrib(self.refkind), )) - if self.refid is not None: - outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) - def exportChildren(self, outfile, level, namespace_='', name_='codelineType'): - for highlight_ in self.highlight: - highlight_.export(outfile, level, namespace_, name_='highlight') - def hasContent_(self): - if ( - self.highlight is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('external'): - self.external = attrs.get('external').value - if attrs.get('lineno'): - try: - self.lineno = int(attrs.get('lineno').value) - except ValueError as exp: - raise ValueError('Bad integer attribute (lineno): %s' % exp) - if attrs.get('refkind'): - self.refkind = attrs.get('refkind').value - if attrs.get('refid'): - self.refid = attrs.get('refid').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'highlight': - obj_ = highlightType.factory() - obj_.build(child_) - self.highlight.append(obj_) -# end class codelineType - - -class highlightType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, classxx=None, sp=None, ref=None, mixedclass_=None, content_=None): - self.classxx = classxx - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if content_ is None: - self.content_ = [] - else: - self.content_ = content_ - def factory(*args_, **kwargs_): - if highlightType.subclass: - return highlightType.subclass(*args_, **kwargs_) - else: - return highlightType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_sp(self): return self.sp - def set_sp(self, sp): self.sp = sp - def add_sp(self, value): self.sp.append(value) - def insert_sp(self, index, value): self.sp[index] = value - def get_ref(self): return self.ref - def set_ref(self, ref): self.ref = ref - def add_ref(self, value): self.ref.append(value) - def insert_ref(self, index, value): self.ref[index] = value - def get_class(self): return self.classxx - def set_class(self, classxx): self.classxx = classxx - def export(self, outfile, level, namespace_='', name_='highlightType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='highlightType') - outfile.write('>') - self.exportChildren(outfile, level + 1, namespace_, name_) - outfile.write('\n' % (namespace_, name_)) - def exportAttributes(self, outfile, level, namespace_='', name_='highlightType'): - if self.classxx is not None: - outfile.write(' class=%s' % (quote_attrib(self.classxx), )) - def exportChildren(self, outfile, level, namespace_='', name_='highlightType'): - for item_ in self.content_: - item_.export(outfile, level, item_.name, namespace_) - def hasContent_(self): - if ( - self.sp is not None or - self.ref is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('class'): - self.classxx = attrs.get('class').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'sp': - value_ = [] - for text_ in child_.childNodes: - value_.append(text_.nodeValue) - # We make this unicode so that our unicode renderer catch-all picks it up - # otherwise it would go through as 'str' and we'd have to pick it up too - valuestr_ = u' ' - obj_ = self.mixedclass_(MixedContainer.CategorySimple, - MixedContainer.TypeString, 'sp', valuestr_) - self.content_.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'ref': - childobj_ = docRefTextType.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'ref', childobj_) - self.content_.append(obj_) - elif child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) - self.content_.append(obj_) -# end class highlightType - - -class sp(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, valueOf_=''): - self.valueOf_ = valueOf_ - def factory(*args_, **kwargs_): - if sp.subclass: - return sp.subclass(*args_, **kwargs_) - else: - return sp(*args_, **kwargs_) - factory = staticmethod(factory) - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def export(self, outfile, level, namespace_='', name_='sp', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='sp') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='sp'): - pass - def exportChildren(self, outfile, level, namespace_='', name_='sp'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') - outfile.write(value) - else: - outfile.write(quote_xml('%s' % self.valueOf_)) - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class sp - - -class referenceType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, endline=None, startline=None, refid=None, compoundref=None, valueOf_='', mixedclass_=None, content_=None): - self.endline = endline - self.startline = startline - self.refid = refid - self.compoundref = compoundref - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if content_ is None: - self.content_ = [] - else: - self.content_ = content_ - def factory(*args_, **kwargs_): - if referenceType.subclass: - return referenceType.subclass(*args_, **kwargs_) - else: - return referenceType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_endline(self): return self.endline - def set_endline(self, endline): self.endline = endline - def get_startline(self): return self.startline - def set_startline(self, startline): self.startline = startline - def get_refid(self): return self.refid - def set_refid(self, refid): self.refid = refid - def get_compoundref(self): return self.compoundref - def set_compoundref(self, compoundref): self.compoundref = compoundref - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def export(self, outfile, level, namespace_='', name_='referenceType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='referenceType') - outfile.write('>') - self.exportChildren(outfile, level + 1, namespace_, name_) - outfile.write('\n' % (namespace_, name_)) - def exportAttributes(self, outfile, level, namespace_='', name_='referenceType'): - if self.endline is not None: - outfile.write(' endline="%s"' % self.format_integer(self.endline, input_name='endline')) - if self.startline is not None: - outfile.write(' startline="%s"' % self.format_integer(self.startline, input_name='startline')) - if self.refid is not None: - outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) - if self.compoundref is not None: - outfile.write(' compoundref=%s' % (self.format_string(quote_attrib(self.compoundref).encode(ExternalEncoding), input_name='compoundref'), )) - def exportChildren(self, outfile, level, namespace_='', name_='referenceType'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') - outfile.write(value) - else: - outfile.write(quote_xml('%s' % self.valueOf_)) - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('endline'): - try: - self.endline = int(attrs.get('endline').value) - except ValueError as exp: - raise ValueError('Bad integer attribute (endline): %s' % exp) - if attrs.get('startline'): - try: - self.startline = int(attrs.get('startline').value) - except ValueError as exp: - raise ValueError('Bad integer attribute (startline): %s' % exp) - if attrs.get('refid'): - self.refid = attrs.get('refid').value - if attrs.get('compoundref'): - self.compoundref = attrs.get('compoundref').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) - self.content_.append(obj_) - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class referenceType - - -class locationType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, bodystart=None, line=None, bodyend=None, bodyfile=None, file=None, valueOf_=''): - self.bodystart = bodystart - self.line = line - self.bodyend = bodyend - self.bodyfile = bodyfile - self.file = file - self.valueOf_ = valueOf_ - def factory(*args_, **kwargs_): - if locationType.subclass: - return locationType.subclass(*args_, **kwargs_) - else: - return locationType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_bodystart(self): return self.bodystart - def set_bodystart(self, bodystart): self.bodystart = bodystart - def get_line(self): return self.line - def set_line(self, line): self.line = line - def get_bodyend(self): return self.bodyend - def set_bodyend(self, bodyend): self.bodyend = bodyend - def get_bodyfile(self): return self.bodyfile - def set_bodyfile(self, bodyfile): self.bodyfile = bodyfile - def get_file(self): return self.file - def set_file(self, file): self.file = file - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def export(self, outfile, level, namespace_='', name_='locationType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='locationType') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='locationType'): - if self.bodystart is not None: - outfile.write(' bodystart="%s"' % self.format_integer(self.bodystart, input_name='bodystart')) - if self.line is not None: - outfile.write(' line="%s"' % self.format_integer(self.line, input_name='line')) - if self.bodyend is not None: - outfile.write(' bodyend="%s"' % self.format_integer(self.bodyend, input_name='bodyend')) - if self.bodyfile is not None: - outfile.write(' bodyfile=%s' % (self.format_string(quote_attrib(self.bodyfile).encode(ExternalEncoding), input_name='bodyfile'), )) - if self.file is not None: - outfile.write(' file=%s' % (self.format_string(quote_attrib(self.file).encode(ExternalEncoding), input_name='file'), )) - def exportChildren(self, outfile, level, namespace_='', name_='locationType'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') - outfile.write(value) - else: - outfile.write(quote_xml('%s' % self.valueOf_)) - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('bodystart'): - try: - self.bodystart = int(attrs.get('bodystart').value) - except ValueError as exp: - raise ValueError('Bad integer attribute (bodystart): %s' % exp) - if attrs.get('line'): - try: - self.line = int(attrs.get('line').value) - except ValueError as exp: - raise ValueError('Bad integer attribute (line): %s' % exp) - if attrs.get('bodyend'): - try: - self.bodyend = int(attrs.get('bodyend').value) - except ValueError as exp: - raise ValueError('Bad integer attribute (bodyend): %s' % exp) - if attrs.get('bodyfile'): - self.bodyfile = attrs.get('bodyfile').value - if attrs.get('file'): - self.file = attrs.get('file').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class locationType - - -class docSect1Type(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, id=None, title=None, para=None, sect2=None, internal=None, mixedclass_=None, content_=None): - self.id = id - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if content_ is None: - self.content_ = [] - else: - self.content_ = content_ - if title is None: - self.title = "" - else: - self.title = title - def factory(*args_, **kwargs_): - if docSect1Type.subclass: - return docSect1Type.subclass(*args_, **kwargs_) - else: - return docSect1Type(*args_, **kwargs_) - factory = staticmethod(factory) - def get_title(self): return self.title - def set_title(self, title): self.title = title - def get_para(self): return self.para - def set_para(self, para): self.para = para - def add_para(self, value): self.para.append(value) - def insert_para(self, index, value): self.para[index] = value - def get_sect2(self): return self.sect2 - def set_sect2(self, sect2): self.sect2 = sect2 - def add_sect2(self, value): self.sect2.append(value) - def insert_sect2(self, index, value): self.sect2[index] = value - def get_internal(self): return self.internal - def set_internal(self, internal): self.internal = internal - def get_id(self): return self.id - def set_id(self, id): self.id = id - def export(self, outfile, level, namespace_='', name_='docSect1Type', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docSect1Type') - outfile.write('>') - self.exportChildren(outfile, level + 1, namespace_, name_) - outfile.write('\n' % (namespace_, name_)) - def exportAttributes(self, outfile, level, namespace_='', name_='docSect1Type'): - if self.id is not None: - outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) - def exportChildren(self, outfile, level, namespace_='', name_='docSect1Type'): - for item_ in self.content_: - item_.export(outfile, level, item_.name, namespace_) - def hasContent_(self): - if ( - self.title is not None or - self.para is not None or - self.sect2 is not None or - self.internal is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('id'): - self.id = attrs.get('id').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'title': - self.title = child_.childNodes[0].nodeValue - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'para': - childobj_ = docParaType.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'para', childobj_) - self.content_.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'sect2': - childobj_ = docSect2Type.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'sect2', childobj_) - self.content_.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'internal': - childobj_ = docInternalS1Type.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'internal', childobj_) - self.content_.append(obj_) - elif child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) - self.content_.append(obj_) -# end class docSect1Type - - -class docSect2Type(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, id=None, title=None, para=None, sect3=None, internal=None, mixedclass_=None, content_=None): - self.id = id - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if content_ is None: - self.content_ = [] - else: - self.content_ = content_ - if title is None: - title = "" - else: - title = title - def factory(*args_, **kwargs_): - if docSect2Type.subclass: - return docSect2Type.subclass(*args_, **kwargs_) - else: - return docSect2Type(*args_, **kwargs_) - factory = staticmethod(factory) - def get_title(self): return self.title - def set_title(self, title): self.title = title - def get_para(self): return self.para - def set_para(self, para): self.para = para - def add_para(self, value): self.para.append(value) - def insert_para(self, index, value): self.para[index] = value - def get_sect3(self): return self.sect3 - def set_sect3(self, sect3): self.sect3 = sect3 - def add_sect3(self, value): self.sect3.append(value) - def insert_sect3(self, index, value): self.sect3[index] = value - def get_internal(self): return self.internal - def set_internal(self, internal): self.internal = internal - def get_id(self): return self.id - def set_id(self, id): self.id = id - def export(self, outfile, level, namespace_='', name_='docSect2Type', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docSect2Type') - outfile.write('>') - self.exportChildren(outfile, level + 1, namespace_, name_) - outfile.write('\n' % (namespace_, name_)) - def exportAttributes(self, outfile, level, namespace_='', name_='docSect2Type'): - if self.id is not None: - outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) - def exportChildren(self, outfile, level, namespace_='', name_='docSect2Type'): - for item_ in self.content_: - item_.export(outfile, level, item_.name, namespace_) - def hasContent_(self): - if ( - self.title is not None or - self.para is not None or - self.sect3 is not None or - self.internal is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('id'): - self.id = attrs.get('id').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'title': - self.title = child_.childNodes[0].nodeValue - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'para': - childobj_ = docParaType.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'para', childobj_) - self.content_.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'sect3': - childobj_ = docSect3Type.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'sect3', childobj_) - self.content_.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'internal': - childobj_ = docInternalS2Type.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'internal', childobj_) - self.content_.append(obj_) - elif child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) - self.content_.append(obj_) -# end class docSect2Type - - -class docSect3Type(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, id=None, title=None, para=None, sect4=None, internal=None, mixedclass_=None, content_=None): - self.id = id - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if content_ is None: - self.content_ = [] - else: - self.content_ = content_ - if title is None: - self.title = "" - else: - self.title = title - def factory(*args_, **kwargs_): - if docSect3Type.subclass: - return docSect3Type.subclass(*args_, **kwargs_) - else: - return docSect3Type(*args_, **kwargs_) - factory = staticmethod(factory) - def get_title(self): return self.title - def set_title(self, title): self.title = title - def get_para(self): return self.para - def set_para(self, para): self.para = para - def add_para(self, value): self.para.append(value) - def insert_para(self, index, value): self.para[index] = value - def get_sect4(self): return self.sect4 - def set_sect4(self, sect4): self.sect4 = sect4 - def add_sect4(self, value): self.sect4.append(value) - def insert_sect4(self, index, value): self.sect4[index] = value - def get_internal(self): return self.internal - def set_internal(self, internal): self.internal = internal - def get_id(self): return self.id - def set_id(self, id): self.id = id - def export(self, outfile, level, namespace_='', name_='docSect3Type', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docSect3Type') - outfile.write('>') - self.exportChildren(outfile, level + 1, namespace_, name_) - outfile.write('\n' % (namespace_, name_)) - def exportAttributes(self, outfile, level, namespace_='', name_='docSect3Type'): - if self.id is not None: - outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) - def exportChildren(self, outfile, level, namespace_='', name_='docSect3Type'): - for item_ in self.content_: - item_.export(outfile, level, item_.name, namespace_) - def hasContent_(self): - if ( - self.title is not None or - self.para is not None or - self.sect4 is not None or - self.internal is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('id'): - self.id = attrs.get('id').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'title': - self.title = child_.childNodes[0].nodeValue - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'para': - childobj_ = docParaType.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'para', childobj_) - self.content_.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'sect4': - childobj_ = docSect4Type.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'sect4', childobj_) - self.content_.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'internal': - childobj_ = docInternalS3Type.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'internal', childobj_) - self.content_.append(obj_) - elif child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) - self.content_.append(obj_) -# end class docSect3Type - - -class docSect4Type(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, id=None, title=None, para=None, internal=None, mixedclass_=None, content_=None): - self.id = id - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if content_ is None: - self.content_ = [] - else: - self.content_ = content_ - def factory(*args_, **kwargs_): - if docSect4Type.subclass: - return docSect4Type.subclass(*args_, **kwargs_) - else: - return docSect4Type(*args_, **kwargs_) - factory = staticmethod(factory) - def get_title(self): return self.title - def set_title(self, title): self.title = title - def get_para(self): return self.para - def set_para(self, para): self.para = para - def add_para(self, value): self.para.append(value) - def insert_para(self, index, value): self.para[index] = value - def get_internal(self): return self.internal - def set_internal(self, internal): self.internal = internal - def get_id(self): return self.id - def set_id(self, id): self.id = id - def export(self, outfile, level, namespace_='', name_='docSect4Type', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docSect4Type') - outfile.write('>') - self.exportChildren(outfile, level + 1, namespace_, name_) - outfile.write('\n' % (namespace_, name_)) - def exportAttributes(self, outfile, level, namespace_='', name_='docSect4Type'): - if self.id is not None: - outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) - def exportChildren(self, outfile, level, namespace_='', name_='docSect4Type'): - for item_ in self.content_: - item_.export(outfile, level, item_.name, namespace_) - def hasContent_(self): - if ( - self.title is not None or - self.para is not None or - self.internal is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('id'): - self.id = attrs.get('id').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'title': - childobj_ = docTitleType.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'title', childobj_) - self.content_.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'para': - childobj_ = docParaType.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'para', childobj_) - self.content_.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'internal': - childobj_ = docInternalS4Type.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'internal', childobj_) - self.content_.append(obj_) - elif child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) - self.content_.append(obj_) -# end class docSect4Type - - -class docInternalType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, para=None, sect1=None, mixedclass_=None, content_=None): - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if content_ is None: - self.content_ = [] - else: - self.content_ = content_ - def factory(*args_, **kwargs_): - if docInternalType.subclass: - return docInternalType.subclass(*args_, **kwargs_) - else: - return docInternalType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_para(self): return self.para - def set_para(self, para): self.para = para - def add_para(self, value): self.para.append(value) - def insert_para(self, index, value): self.para[index] = value - def get_sect1(self): return self.sect1 - def set_sect1(self, sect1): self.sect1 = sect1 - def add_sect1(self, value): self.sect1.append(value) - def insert_sect1(self, index, value): self.sect1[index] = value - def export(self, outfile, level, namespace_='', name_='docInternalType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docInternalType') - outfile.write('>') - self.exportChildren(outfile, level + 1, namespace_, name_) - outfile.write('\n' % (namespace_, name_)) - def exportAttributes(self, outfile, level, namespace_='', name_='docInternalType'): - pass - def exportChildren(self, outfile, level, namespace_='', name_='docInternalType'): - for item_ in self.content_: - item_.export(outfile, level, item_.name, namespace_) - def hasContent_(self): - if ( - self.para is not None or - self.sect1 is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'para': - childobj_ = docParaType.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'para', childobj_) - self.content_.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'sect1': - childobj_ = docSect1Type.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'sect1', childobj_) - self.content_.append(obj_) - elif child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) - self.content_.append(obj_) -# end class docInternalType - - -class docInternalS1Type(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, para=None, sect2=None, mixedclass_=None, content_=None): - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if content_ is None: - self.content_ = [] - else: - self.content_ = content_ - def factory(*args_, **kwargs_): - if docInternalS1Type.subclass: - return docInternalS1Type.subclass(*args_, **kwargs_) - else: - return docInternalS1Type(*args_, **kwargs_) - factory = staticmethod(factory) - def get_para(self): return self.para - def set_para(self, para): self.para = para - def add_para(self, value): self.para.append(value) - def insert_para(self, index, value): self.para[index] = value - def get_sect2(self): return self.sect2 - def set_sect2(self, sect2): self.sect2 = sect2 - def add_sect2(self, value): self.sect2.append(value) - def insert_sect2(self, index, value): self.sect2[index] = value - def export(self, outfile, level, namespace_='', name_='docInternalS1Type', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docInternalS1Type') - outfile.write('>') - self.exportChildren(outfile, level + 1, namespace_, name_) - outfile.write('\n' % (namespace_, name_)) - def exportAttributes(self, outfile, level, namespace_='', name_='docInternalS1Type'): - pass - def exportChildren(self, outfile, level, namespace_='', name_='docInternalS1Type'): - for item_ in self.content_: - item_.export(outfile, level, item_.name, namespace_) - def hasContent_(self): - if ( - self.para is not None or - self.sect2 is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'para': - childobj_ = docParaType.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'para', childobj_) - self.content_.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'sect2': - childobj_ = docSect2Type.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'sect2', childobj_) - self.content_.append(obj_) - elif child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) - self.content_.append(obj_) -# end class docInternalS1Type - - -class docInternalS2Type(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None): - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if content_ is None: - self.content_ = [] - else: - self.content_ = content_ - def factory(*args_, **kwargs_): - if docInternalS2Type.subclass: - return docInternalS2Type.subclass(*args_, **kwargs_) - else: - return docInternalS2Type(*args_, **kwargs_) - factory = staticmethod(factory) - def get_para(self): return self.para - def set_para(self, para): self.para = para - def add_para(self, value): self.para.append(value) - def insert_para(self, index, value): self.para[index] = value - def get_sect3(self): return self.sect3 - def set_sect3(self, sect3): self.sect3 = sect3 - def add_sect3(self, value): self.sect3.append(value) - def insert_sect3(self, index, value): self.sect3[index] = value - def export(self, outfile, level, namespace_='', name_='docInternalS2Type', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docInternalS2Type') - outfile.write('>') - self.exportChildren(outfile, level + 1, namespace_, name_) - outfile.write('\n' % (namespace_, name_)) - def exportAttributes(self, outfile, level, namespace_='', name_='docInternalS2Type'): - pass - def exportChildren(self, outfile, level, namespace_='', name_='docInternalS2Type'): - for item_ in self.content_: - item_.export(outfile, level, item_.name, namespace_) - def hasContent_(self): - if ( - self.para is not None or - self.sect3 is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'para': - childobj_ = docParaType.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'para', childobj_) - self.content_.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'sect3': - childobj_ = docSect3Type.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'sect3', childobj_) - self.content_.append(obj_) - elif child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) - self.content_.append(obj_) -# end class docInternalS2Type - - -class docInternalS3Type(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None): - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if content_ is None: - self.content_ = [] - else: - self.content_ = content_ - def factory(*args_, **kwargs_): - if docInternalS3Type.subclass: - return docInternalS3Type.subclass(*args_, **kwargs_) - else: - return docInternalS3Type(*args_, **kwargs_) - factory = staticmethod(factory) - def get_para(self): return self.para - def set_para(self, para): self.para = para - def add_para(self, value): self.para.append(value) - def insert_para(self, index, value): self.para[index] = value - def get_sect3(self): return self.sect3 - def set_sect3(self, sect3): self.sect3 = sect3 - def add_sect3(self, value): self.sect3.append(value) - def insert_sect3(self, index, value): self.sect3[index] = value - def export(self, outfile, level, namespace_='', name_='docInternalS3Type', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docInternalS3Type') - outfile.write('>') - self.exportChildren(outfile, level + 1, namespace_, name_) - outfile.write('\n' % (namespace_, name_)) - def exportAttributes(self, outfile, level, namespace_='', name_='docInternalS3Type'): - pass - def exportChildren(self, outfile, level, namespace_='', name_='docInternalS3Type'): - for item_ in self.content_: - item_.export(outfile, level, item_.name, namespace_) - def hasContent_(self): - if ( - self.para is not None or - self.sect3 is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'para': - childobj_ = docParaType.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'para', childobj_) - self.content_.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'sect3': - childobj_ = docSect4Type.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'sect3', childobj_) - self.content_.append(obj_) - elif child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) - self.content_.append(obj_) -# end class docInternalS3Type - - -class docInternalS4Type(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, para=None, mixedclass_=None, content_=None): - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if content_ is None: - self.content_ = [] - else: - self.content_ = content_ - def factory(*args_, **kwargs_): - if docInternalS4Type.subclass: - return docInternalS4Type.subclass(*args_, **kwargs_) - else: - return docInternalS4Type(*args_, **kwargs_) - factory = staticmethod(factory) - def get_para(self): return self.para - def set_para(self, para): self.para = para - def add_para(self, value): self.para.append(value) - def insert_para(self, index, value): self.para[index] = value - def export(self, outfile, level, namespace_='', name_='docInternalS4Type', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docInternalS4Type') - outfile.write('>') - self.exportChildren(outfile, level + 1, namespace_, name_) - outfile.write('\n' % (namespace_, name_)) - def exportAttributes(self, outfile, level, namespace_='', name_='docInternalS4Type'): - pass - def exportChildren(self, outfile, level, namespace_='', name_='docInternalS4Type'): - for item_ in self.content_: - item_.export(outfile, level, item_.name, namespace_) - def hasContent_(self): - if ( - self.para is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'para': - childobj_ = docParaType.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'para', childobj_) - self.content_.append(obj_) - elif child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) - self.content_.append(obj_) -# end class docInternalS4Type - - -class docTitleType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, valueOf_='', mixedclass_=None, content_=None): - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if content_ is None: - self.content_ = [] - else: - self.content_ = content_ - def factory(*args_, **kwargs_): - if docTitleType.subclass: - return docTitleType.subclass(*args_, **kwargs_) - else: - return docTitleType(*args_, **kwargs_) - factory = staticmethod(factory) - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def export(self, outfile, level, namespace_='', name_='docTitleType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docTitleType') - outfile.write('>') - self.exportChildren(outfile, level + 1, namespace_, name_) - outfile.write('\n' % (namespace_, name_)) - def exportAttributes(self, outfile, level, namespace_='', name_='docTitleType'): - pass - def exportChildren(self, outfile, level, namespace_='', name_='docTitleType'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') - outfile.write(value) - else: - outfile.write(quote_xml('%s' % self.valueOf_)) - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) - self.content_.append(obj_) - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class docTitleType - - -class docParaType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, valueOf_='', mixedclass_=None, content_=None): - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if content_ is None: - self.content_ = [] - else: - self.content_ = content_ - def factory(*args_, **kwargs_): - if docParaType.subclass: - return docParaType.subclass(*args_, **kwargs_) - else: - return docParaType(*args_, **kwargs_) - factory = staticmethod(factory) - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def export(self, outfile, level, namespace_='', name_='docParaType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docParaType') - outfile.write('>') - self.exportChildren(outfile, level + 1, namespace_, name_) - outfile.write('\n' % (namespace_, name_)) - def exportAttributes(self, outfile, level, namespace_='', name_='docParaType'): - pass - def exportChildren(self, outfile, level, namespace_='', name_='docParaType'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') - outfile.write(value) - else: - outfile.write(quote_xml('%s' % self.valueOf_)) - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) - self.content_.append(obj_) - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class docParaType - - -class docMarkupType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, valueOf_='', mixedclass_=None, content_=None): - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if content_ is None: - self.content_ = [] - else: - self.content_ = content_ - def factory(*args_, **kwargs_): - if docMarkupType.subclass: - return docMarkupType.subclass(*args_, **kwargs_) - else: - return docMarkupType(*args_, **kwargs_) - factory = staticmethod(factory) - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def export(self, outfile, level, namespace_='', name_='docMarkupType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docMarkupType') - outfile.write('>') - self.exportChildren(outfile, level + 1, namespace_, name_) - outfile.write('\n' % (namespace_, name_)) - def exportAttributes(self, outfile, level, namespace_='', name_='docMarkupType'): - pass - def exportChildren(self, outfile, level, namespace_='', name_='docMarkupType'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') - outfile.write(value) - else: - outfile.write(quote_xml('%s' % self.valueOf_)) - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) - self.content_.append(obj_) - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class docMarkupType - - -class docURLLink(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, url=None, valueOf_='', mixedclass_=None, content_=None): - self.url = url - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if content_ is None: - self.content_ = [] - else: - self.content_ = content_ - def factory(*args_, **kwargs_): - if docURLLink.subclass: - return docURLLink.subclass(*args_, **kwargs_) - else: - return docURLLink(*args_, **kwargs_) - factory = staticmethod(factory) - def get_url(self): return self.url - def set_url(self, url): self.url = url - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def export(self, outfile, level, namespace_='', name_='docURLLink', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docURLLink') - outfile.write('>') - self.exportChildren(outfile, level + 1, namespace_, name_) - outfile.write('\n' % (namespace_, name_)) - def exportAttributes(self, outfile, level, namespace_='', name_='docURLLink'): - if self.url is not None: - outfile.write(' url=%s' % (self.format_string(quote_attrib(self.url).encode(ExternalEncoding), input_name='url'), )) - def exportChildren(self, outfile, level, namespace_='', name_='docURLLink'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') - outfile.write(value) - else: - outfile.write(quote_xml('%s' % self.valueOf_)) - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('url'): - self.url = attrs.get('url').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) - self.content_.append(obj_) - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class docURLLink - - -class docAnchorType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None): - self.id = id - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if content_ is None: - self.content_ = [] - else: - self.content_ = content_ - def factory(*args_, **kwargs_): - if docAnchorType.subclass: - return docAnchorType.subclass(*args_, **kwargs_) - else: - return docAnchorType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_id(self): return self.id - def set_id(self, id): self.id = id - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def export(self, outfile, level, namespace_='', name_='docAnchorType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docAnchorType') - outfile.write('>') - self.exportChildren(outfile, level + 1, namespace_, name_) - outfile.write('\n' % (namespace_, name_)) - def exportAttributes(self, outfile, level, namespace_='', name_='docAnchorType'): - if self.id is not None: - outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) - def exportChildren(self, outfile, level, namespace_='', name_='docAnchorType'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') - outfile.write(value) - else: - outfile.write(quote_xml('%s' % self.valueOf_)) - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('id'): - self.id = attrs.get('id').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) - self.content_.append(obj_) - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class docAnchorType - - -class docFormulaType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None): - self.id = id - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if content_ is None: - self.content_ = [] - else: - self.content_ = content_ - def factory(*args_, **kwargs_): - if docFormulaType.subclass: - return docFormulaType.subclass(*args_, **kwargs_) - else: - return docFormulaType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_id(self): return self.id - def set_id(self, id): self.id = id - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def export(self, outfile, level, namespace_='', name_='docFormulaType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docFormulaType') - outfile.write('>') - self.exportChildren(outfile, level + 1, namespace_, name_) - outfile.write('\n' % (namespace_, name_)) - def exportAttributes(self, outfile, level, namespace_='', name_='docFormulaType'): - if self.id is not None: - outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) - def exportChildren(self, outfile, level, namespace_='', name_='docFormulaType'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') - outfile.write(value) - else: - outfile.write(quote_xml('%s' % self.valueOf_)) - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('id'): - self.id = attrs.get('id').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) - self.content_.append(obj_) - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class docFormulaType - - -class docIndexEntryType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, primaryie=None, secondaryie=None): - self.primaryie = primaryie - self.secondaryie = secondaryie - def factory(*args_, **kwargs_): - if docIndexEntryType.subclass: - return docIndexEntryType.subclass(*args_, **kwargs_) - else: - return docIndexEntryType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_primaryie(self): return self.primaryie - def set_primaryie(self, primaryie): self.primaryie = primaryie - def get_secondaryie(self): return self.secondaryie - def set_secondaryie(self, secondaryie): self.secondaryie = secondaryie - def export(self, outfile, level, namespace_='', name_='docIndexEntryType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docIndexEntryType') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='docIndexEntryType'): - pass - def exportChildren(self, outfile, level, namespace_='', name_='docIndexEntryType'): - if self.primaryie is not None: - showIndent(outfile, level) - outfile.write('<%sprimaryie>%s\n' % (namespace_, self.format_string(quote_xml(self.primaryie).encode(ExternalEncoding), input_name='primaryie'), namespace_)) - if self.secondaryie is not None: - showIndent(outfile, level) - outfile.write('<%ssecondaryie>%s\n' % (namespace_, self.format_string(quote_xml(self.secondaryie).encode(ExternalEncoding), input_name='secondaryie'), namespace_)) - def hasContent_(self): - if ( - self.primaryie is not None or - self.secondaryie is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'primaryie': - primaryie_ = '' - for text__content_ in child_.childNodes: - primaryie_ += text__content_.nodeValue - self.primaryie = primaryie_ - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'secondaryie': - secondaryie_ = '' - for text__content_ in child_.childNodes: - secondaryie_ += text__content_.nodeValue - self.secondaryie = secondaryie_ -# end class docIndexEntryType - - -class docListType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, listitem=None): - if listitem is None: - self.listitem = [] - else: - self.listitem = listitem - def factory(*args_, **kwargs_): - if docListType.subclass: - return docListType.subclass(*args_, **kwargs_) - else: - return docListType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_listitem(self): return self.listitem - def set_listitem(self, listitem): self.listitem = listitem - def add_listitem(self, value): self.listitem.append(value) - def insert_listitem(self, index, value): self.listitem[index] = value - def export(self, outfile, level, namespace_='', name_='docListType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docListType') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='docListType'): - pass - def exportChildren(self, outfile, level, namespace_='', name_='docListType'): - for listitem_ in self.listitem: - listitem_.export(outfile, level, namespace_, name_='listitem') - def hasContent_(self): - if ( - self.listitem is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'listitem': - obj_ = docListItemType.factory() - obj_.build(child_) - self.listitem.append(obj_) -# end class docListType - - -class docListItemType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, para=None): - if para is None: - self.para = [] - else: - self.para = para - def factory(*args_, **kwargs_): - if docListItemType.subclass: - return docListItemType.subclass(*args_, **kwargs_) - else: - return docListItemType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_para(self): return self.para - def set_para(self, para): self.para = para - def add_para(self, value): self.para.append(value) - def insert_para(self, index, value): self.para[index] = value - def export(self, outfile, level, namespace_='', name_='docListItemType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docListItemType') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='docListItemType'): - pass - def exportChildren(self, outfile, level, namespace_='', name_='docListItemType'): - for para_ in self.para: - para_.export(outfile, level, namespace_, name_='para') - def hasContent_(self): - if ( - self.para is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'para': - obj_ = docParaType.factory() - obj_.build(child_) - self.para.append(obj_) -# end class docListItemType - - -class docSimpleSectType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, kind=None, title=None, para=None): - self.kind = kind - self.title = title - if para is None: - self.para = [] - else: - self.para = para - def factory(*args_, **kwargs_): - if docSimpleSectType.subclass: - return docSimpleSectType.subclass(*args_, **kwargs_) - else: - return docSimpleSectType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_title(self): return self.title - def set_title(self, title): self.title = title - def get_para(self): return self.para - def set_para(self, para): self.para = para - def add_para(self, value): self.para.append(value) - def insert_para(self, index, value): self.para[index] = value - def get_kind(self): return self.kind - def set_kind(self, kind): self.kind = kind - def export(self, outfile, level, namespace_='', name_='docSimpleSectType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docSimpleSectType') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='docSimpleSectType'): - if self.kind is not None: - outfile.write(' kind=%s' % (quote_attrib(self.kind), )) - def exportChildren(self, outfile, level, namespace_='', name_='docSimpleSectType'): - if self.title: - self.title.export(outfile, level, namespace_, name_='title') - for para_ in self.para: - para_.export(outfile, level, namespace_, name_='para') - def hasContent_(self): - if ( - self.title is not None or - self.para is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('kind'): - self.kind = attrs.get('kind').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'title': - obj_ = docTitleType.factory() - obj_.build(child_) - self.set_title(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'para': - obj_ = docParaType.factory() - obj_.build(child_) - self.para.append(obj_) -# end class docSimpleSectType - - -class docVarListEntryType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, term=None): - self.term = term - def factory(*args_, **kwargs_): - if docVarListEntryType.subclass: - return docVarListEntryType.subclass(*args_, **kwargs_) - else: - return docVarListEntryType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_term(self): return self.term - def set_term(self, term): self.term = term - def export(self, outfile, level, namespace_='', name_='docVarListEntryType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docVarListEntryType') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='docVarListEntryType'): - pass - def exportChildren(self, outfile, level, namespace_='', name_='docVarListEntryType'): - if self.term: - self.term.export(outfile, level, namespace_, name_='term', ) - def hasContent_(self): - if ( - self.term is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'term': - obj_ = docTitleType.factory() - obj_.build(child_) - self.set_term(obj_) -# end class docVarListEntryType - - -class docVariableListType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, valueOf_=''): - self.valueOf_ = valueOf_ - def factory(*args_, **kwargs_): - if docVariableListType.subclass: - return docVariableListType.subclass(*args_, **kwargs_) - else: - return docVariableListType(*args_, **kwargs_) - factory = staticmethod(factory) - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def export(self, outfile, level, namespace_='', name_='docVariableListType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docVariableListType') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='docVariableListType'): - pass - def exportChildren(self, outfile, level, namespace_='', name_='docVariableListType'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') - outfile.write(value) - else: - outfile.write(quote_xml('%s' % self.valueOf_)) - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class docVariableListType - - -class docRefTextType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, refid=None, kindref=None, external=None, valueOf_='', mixedclass_=None, content_=None): - self.refid = refid - self.kindref = kindref - self.external = external - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if content_ is None: - self.content_ = [] - else: - self.content_ = content_ - def factory(*args_, **kwargs_): - if docRefTextType.subclass: - return docRefTextType.subclass(*args_, **kwargs_) - else: - return docRefTextType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_refid(self): return self.refid - def set_refid(self, refid): self.refid = refid - def get_kindref(self): return self.kindref - def set_kindref(self, kindref): self.kindref = kindref - def get_external(self): return self.external - def set_external(self, external): self.external = external - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def export(self, outfile, level, namespace_='', name_='docRefTextType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docRefTextType') - outfile.write('>') - self.exportChildren(outfile, level + 1, namespace_, name_) - outfile.write('\n' % (namespace_, name_)) - def exportAttributes(self, outfile, level, namespace_='', name_='docRefTextType'): - if self.refid is not None: - outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) - if self.kindref is not None: - outfile.write(' kindref=%s' % (quote_attrib(self.kindref), )) - if self.external is not None: - outfile.write(' external=%s' % (self.format_string(quote_attrib(self.external).encode(ExternalEncoding), input_name='external'), )) - def exportChildren(self, outfile, level, namespace_='', name_='docRefTextType'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') - outfile.write(value) - else: - outfile.write(quote_xml('%s' % self.valueOf_)) - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('refid'): - self.refid = attrs.get('refid').value - if attrs.get('kindref'): - self.kindref = attrs.get('kindref').value - if attrs.get('external'): - self.external = attrs.get('external').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) - self.content_.append(obj_) - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class docRefTextType - - -class docTableType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, rows=None, cols=None, row=None, caption=None): - self.rows = rows - self.cols = cols - if row is None: - self.row = [] - else: - self.row = row - self.caption = caption - def factory(*args_, **kwargs_): - if docTableType.subclass: - return docTableType.subclass(*args_, **kwargs_) - else: - return docTableType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_row(self): return self.row - def set_row(self, row): self.row = row - def add_row(self, value): self.row.append(value) - def insert_row(self, index, value): self.row[index] = value - def get_caption(self): return self.caption - def set_caption(self, caption): self.caption = caption - def get_rows(self): return self.rows - def set_rows(self, rows): self.rows = rows - def get_cols(self): return self.cols - def set_cols(self, cols): self.cols = cols - def export(self, outfile, level, namespace_='', name_='docTableType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docTableType') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='docTableType'): - if self.rows is not None: - outfile.write(' rows="%s"' % self.format_integer(self.rows, input_name='rows')) - if self.cols is not None: - outfile.write(' cols="%s"' % self.format_integer(self.cols, input_name='cols')) - def exportChildren(self, outfile, level, namespace_='', name_='docTableType'): - for row_ in self.row: - row_.export(outfile, level, namespace_, name_='row') - if self.caption: - self.caption.export(outfile, level, namespace_, name_='caption') - def hasContent_(self): - if ( - self.row is not None or - self.caption is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('rows'): - try: - self.rows = int(attrs.get('rows').value) - except ValueError as exp: - raise ValueError('Bad integer attribute (rows): %s' % exp) - if attrs.get('cols'): - try: - self.cols = int(attrs.get('cols').value) - except ValueError as exp: - raise ValueError('Bad integer attribute (cols): %s' % exp) - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'row': - obj_ = docRowType.factory() - obj_.build(child_) - self.row.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'caption': - obj_ = docCaptionType.factory() - obj_.build(child_) - self.set_caption(obj_) -# end class docTableType - - -class docRowType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, entry=None): - if entry is None: - self.entry = [] - else: - self.entry = entry - def factory(*args_, **kwargs_): - if docRowType.subclass: - return docRowType.subclass(*args_, **kwargs_) - else: - return docRowType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_entry(self): return self.entry - def set_entry(self, entry): self.entry = entry - def add_entry(self, value): self.entry.append(value) - def insert_entry(self, index, value): self.entry[index] = value - def export(self, outfile, level, namespace_='', name_='docRowType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docRowType') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='docRowType'): - pass - def exportChildren(self, outfile, level, namespace_='', name_='docRowType'): - for entry_ in self.entry: - entry_.export(outfile, level, namespace_, name_='entry') - def hasContent_(self): - if ( - self.entry is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'entry': - obj_ = docEntryType.factory() - obj_.build(child_) - self.entry.append(obj_) -# end class docRowType - - -class docEntryType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, thead=None, align=None, rowspan=None, colspan=None, para=None): - self.thead = thead - self.align = align - self.rowspan = rowspan - self.colspan = colspan - if para is None: - self.para = [] - else: - self.para = para - def factory(*args_, **kwargs_): - if docEntryType.subclass: - return docEntryType.subclass(*args_, **kwargs_) - else: - return docEntryType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_para(self): return self.para - def set_para(self, para): self.para = para - def add_para(self, value): self.para.append(value) - def insert_para(self, index, value): self.para[index] = value - def get_thead(self): return self.thead - def set_thead(self, thead): self.thead = thead - def get_align(self): return self.align - def set_align(self, align): self.align = align - def get_rowspan(self): return self.rowspan - def set_rowspan(self, rowspan): self.rowspan = rowspan - def get_colspan(self): return self.colspan - def set_colspan(self, colspan): self.colspan = colspan - def export(self, outfile, level, namespace_='', name_='docEntryType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docEntryType') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='docEntryType'): - if self.thead is not None: - outfile.write(' thead=%s' % (quote_attrib(self.thead), )) - if self.align is not None: - outfile.write(' align=%s' % (quote_attrib(self.align), )) - if self.rowspan is not None: - outfile.write(' rowspan=%s' % (quote_attrib(self.rowspan), )) - if self.colspan is not None: - outfile.write(' colspan=%s' % (quote_attrib(self.colspan), )) - def exportChildren(self, outfile, level, namespace_='', name_='docEntryType'): - for para_ in self.para: - para_.export(outfile, level, namespace_, name_='para') - def hasContent_(self): - if ( - self.para is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('thead'): - self.thead = attrs.get('thead').value - if attrs.get('align'): - self.align = attrs.get('align').value - if attrs.get('rowspan'): - self.rowspan = attrs.get('rowspan').value - if attrs.get('colspan'): - self.colspan = attrs.get('colspan').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'para': - obj_ = docParaType.factory() - obj_.build(child_) - self.para.append(obj_) -# end class docEntryType - - -class docCaptionType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, valueOf_='', mixedclass_=None, content_=None): - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if content_ is None: - self.content_ = [] - else: - self.content_ = content_ - def factory(*args_, **kwargs_): - if docCaptionType.subclass: - return docCaptionType.subclass(*args_, **kwargs_) - else: - return docCaptionType(*args_, **kwargs_) - factory = staticmethod(factory) - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def export(self, outfile, level, namespace_='', name_='docCaptionType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docCaptionType') - outfile.write('>') - self.exportChildren(outfile, level + 1, namespace_, name_) - outfile.write('\n' % (namespace_, name_)) - def exportAttributes(self, outfile, level, namespace_='', name_='docCaptionType'): - pass - def exportChildren(self, outfile, level, namespace_='', name_='docCaptionType'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') - outfile.write(value) - else: - outfile.write(quote_xml('%s' % self.valueOf_)) - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) - self.content_.append(obj_) - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class docCaptionType - - -class docHeadingType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, level=None, valueOf_='', mixedclass_=None, content_=None): - self.level = level - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if content_ is None: - self.content_ = [] - else: - self.content_ = content_ - def factory(*args_, **kwargs_): - if docHeadingType.subclass: - return docHeadingType.subclass(*args_, **kwargs_) - else: - return docHeadingType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_level(self): return self.level - def set_level(self, level): self.level = level - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def export(self, outfile, level, namespace_='', name_='docHeadingType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docHeadingType') - outfile.write('>') - self.exportChildren(outfile, level + 1, namespace_, name_) - outfile.write('\n' % (namespace_, name_)) - def exportAttributes(self, outfile, level, namespace_='', name_='docHeadingType'): - if self.level is not None: - outfile.write(' level="%s"' % self.format_integer(self.level, input_name='level')) - def exportChildren(self, outfile, level, namespace_='', name_='docHeadingType'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') - outfile.write(value) - else: - outfile.write(quote_xml('%s' % self.valueOf_)) - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('level'): - try: - self.level = int(attrs.get('level').value) - except ValueError as exp: - raise ValueError('Bad integer attribute (level): %s' % exp) - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) - self.content_.append(obj_) - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class docHeadingType - - -class docImageType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, width=None, type_=None, name=None, height=None, valueOf_='', mixedclass_=None, content_=None): - self.width = width - self.type_ = type_ - self.name = name - self.height = height - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if content_ is None: - self.content_ = [] - else: - self.content_ = content_ - def factory(*args_, **kwargs_): - if docImageType.subclass: - return docImageType.subclass(*args_, **kwargs_) - else: - return docImageType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_width(self): return self.width - def set_width(self, width): self.width = width - def get_type(self): return self.type_ - def set_type(self, type_): self.type_ = type_ - def get_name(self): return self.name - def set_name(self, name): self.name = name - def get_height(self): return self.height - def set_height(self, height): self.height = height - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def export(self, outfile, level, namespace_='', name_='docImageType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docImageType') - outfile.write('>') - self.exportChildren(outfile, level + 1, namespace_, name_) - outfile.write('\n' % (namespace_, name_)) - def exportAttributes(self, outfile, level, namespace_='', name_='docImageType'): - if self.width is not None: - outfile.write(' width=%s' % (self.format_string(quote_attrib(self.width).encode(ExternalEncoding), input_name='width'), )) - if self.type_ is not None: - outfile.write(' type=%s' % (quote_attrib(self.type_), )) - if self.name is not None: - outfile.write(' name=%s' % (self.format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), )) - if self.height is not None: - outfile.write(' height=%s' % (self.format_string(quote_attrib(self.height).encode(ExternalEncoding), input_name='height'), )) - def exportChildren(self, outfile, level, namespace_='', name_='docImageType'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') - outfile.write(value) - else: - outfile.write(quote_xml('%s' % self.valueOf_)) - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('width'): - self.width = attrs.get('width').value - if attrs.get('type'): - self.type_ = attrs.get('type').value - if attrs.get('name'): - self.name = attrs.get('name').value - if attrs.get('height'): - self.height = attrs.get('height').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) - self.content_.append(obj_) - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class docImageType - - -class docDotFileType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, name=None, valueOf_='', mixedclass_=None, content_=None): - self.name = name - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if content_ is None: - self.content_ = [] - else: - self.content_ = content_ - def factory(*args_, **kwargs_): - if docDotFileType.subclass: - return docDotFileType.subclass(*args_, **kwargs_) - else: - return docDotFileType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_name(self): return self.name - def set_name(self, name): self.name = name - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def export(self, outfile, level, namespace_='', name_='docDotFileType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docDotFileType') - outfile.write('>') - self.exportChildren(outfile, level + 1, namespace_, name_) - outfile.write('\n' % (namespace_, name_)) - def exportAttributes(self, outfile, level, namespace_='', name_='docDotFileType'): - if self.name is not None: - outfile.write(' name=%s' % (self.format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), )) - def exportChildren(self, outfile, level, namespace_='', name_='docDotFileType'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') - outfile.write(value) - else: - outfile.write(quote_xml('%s' % self.valueOf_)) - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('name'): - self.name = attrs.get('name').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) - self.content_.append(obj_) - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class docDotFileType - - -class docDotType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, caption=None, valueOf_='', mixedclass_=None, content_=None): - self.caption = caption - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if content_ is None: - self.content_ = [] - else: - self.content_ = content_ - def factory(*args_, **kwargs_): - if docDotType.subclass: - return docDotType.subclass(*args_, **kwargs_) - else: - return docDotType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_name(self): return self.caption - def set_name(self, caption): self.caption = caption - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def export(self, outfile, level, namespace_='', name_='docDotType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docDotType') - outfile.write('>') - self.exportChildren(outfile, level + 1, namespace_, name_) - outfile.write('\n' % (namespace_, name_)) - def exportAttributes(self, outfile, level, namespace_='', name_='docDotType'): - if self.caption is not None: - outfile.write(' caption=%s' % (self.format_string(quote_attrib(self.caption).encode(ExternalEncoding), input_name='caption'), )) - def exportChildren(self, outfile, level, namespace_='', name_='docDotType'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') - outfile.write(value) - else: - outfile.write(quote_xml('%s' % self.valueOf_)) - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('caption'): - self.caption = attrs.get('caption').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) - self.content_.append(obj_) - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class docDotType - - -class docTocItemType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None): - self.id = id - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if content_ is None: - self.content_ = [] - else: - self.content_ = content_ - def factory(*args_, **kwargs_): - if docTocItemType.subclass: - return docTocItemType.subclass(*args_, **kwargs_) - else: - return docTocItemType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_id(self): return self.id - def set_id(self, id): self.id = id - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def export(self, outfile, level, namespace_='', name_='docTocItemType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docTocItemType') - outfile.write('>') - self.exportChildren(outfile, level + 1, namespace_, name_) - outfile.write('\n' % (namespace_, name_)) - def exportAttributes(self, outfile, level, namespace_='', name_='docTocItemType'): - if self.id is not None: - outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) - def exportChildren(self, outfile, level, namespace_='', name_='docTocItemType'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') - outfile.write(value) - else: - outfile.write(quote_xml('%s' % self.valueOf_)) - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('id'): - self.id = attrs.get('id').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) - self.content_.append(obj_) - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class docTocItemType - - -class docTocListType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, tocitem=None): - if tocitem is None: - self.tocitem = [] - else: - self.tocitem = tocitem - def factory(*args_, **kwargs_): - if docTocListType.subclass: - return docTocListType.subclass(*args_, **kwargs_) - else: - return docTocListType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_tocitem(self): return self.tocitem - def set_tocitem(self, tocitem): self.tocitem = tocitem - def add_tocitem(self, value): self.tocitem.append(value) - def insert_tocitem(self, index, value): self.tocitem[index] = value - def export(self, outfile, level, namespace_='', name_='docTocListType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docTocListType') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='docTocListType'): - pass - def exportChildren(self, outfile, level, namespace_='', name_='docTocListType'): - for tocitem_ in self.tocitem: - tocitem_.export(outfile, level, namespace_, name_='tocitem') - def hasContent_(self): - if ( - self.tocitem is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'tocitem': - obj_ = docTocItemType.factory() - obj_.build(child_) - self.tocitem.append(obj_) -# end class docTocListType - - -class docLanguageType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, langid=None, para=None): - self.langid = langid - if para is None: - self.para = [] - else: - self.para = para - def factory(*args_, **kwargs_): - if docLanguageType.subclass: - return docLanguageType.subclass(*args_, **kwargs_) - else: - return docLanguageType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_para(self): return self.para - def set_para(self, para): self.para = para - def add_para(self, value): self.para.append(value) - def insert_para(self, index, value): self.para[index] = value - def get_langid(self): return self.langid - def set_langid(self, langid): self.langid = langid - def export(self, outfile, level, namespace_='', name_='docLanguageType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docLanguageType') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='docLanguageType'): - if self.langid is not None: - outfile.write(' langid=%s' % (self.format_string(quote_attrib(self.langid).encode(ExternalEncoding), input_name='langid'), )) - def exportChildren(self, outfile, level, namespace_='', name_='docLanguageType'): - for para_ in self.para: - para_.export(outfile, level, namespace_, name_='para') - def hasContent_(self): - if ( - self.para is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('langid'): - self.langid = attrs.get('langid').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'para': - obj_ = docParaType.factory() - obj_.build(child_) - self.para.append(obj_) -# end class docLanguageType - - -class docParamListType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, kind=None, parameteritem=None): - self.kind = kind - if parameteritem is None: - self.parameteritem = [] - else: - self.parameteritem = parameteritem - def factory(*args_, **kwargs_): - if docParamListType.subclass: - return docParamListType.subclass(*args_, **kwargs_) - else: - return docParamListType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_parameteritem(self): return self.parameteritem - def set_parameteritem(self, parameteritem): self.parameteritem = parameteritem - def add_parameteritem(self, value): self.parameteritem.append(value) - def insert_parameteritem(self, index, value): self.parameteritem[index] = value - def get_kind(self): return self.kind - def set_kind(self, kind): self.kind = kind - def export(self, outfile, level, namespace_='', name_='docParamListType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docParamListType') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='docParamListType'): - if self.kind is not None: - outfile.write(' kind=%s' % (quote_attrib(self.kind), )) - def exportChildren(self, outfile, level, namespace_='', name_='docParamListType'): - for parameteritem_ in self.parameteritem: - parameteritem_.export(outfile, level, namespace_, name_='parameteritem') - def hasContent_(self): - if ( - self.parameteritem is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('kind'): - self.kind = attrs.get('kind').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'parameteritem': - obj_ = docParamListItem.factory() - obj_.build(child_) - self.parameteritem.append(obj_) -# end class docParamListType - - -class docParamListItem(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, parameternamelist=None, parameterdescription=None): - if parameternamelist is None: - self.parameternamelist = [] - else: - self.parameternamelist = parameternamelist - self.parameterdescription = parameterdescription - def factory(*args_, **kwargs_): - if docParamListItem.subclass: - return docParamListItem.subclass(*args_, **kwargs_) - else: - return docParamListItem(*args_, **kwargs_) - factory = staticmethod(factory) - def get_parameternamelist(self): return self.parameternamelist - def set_parameternamelist(self, parameternamelist): self.parameternamelist = parameternamelist - def add_parameternamelist(self, value): self.parameternamelist.append(value) - def insert_parameternamelist(self, index, value): self.parameternamelist[index] = value - def get_parameterdescription(self): return self.parameterdescription - def set_parameterdescription(self, parameterdescription): self.parameterdescription = parameterdescription - def export(self, outfile, level, namespace_='', name_='docParamListItem', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docParamListItem') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='docParamListItem'): - pass - def exportChildren(self, outfile, level, namespace_='', name_='docParamListItem'): - for parameternamelist_ in self.parameternamelist: - parameternamelist_.export(outfile, level, namespace_, name_='parameternamelist') - if self.parameterdescription: - self.parameterdescription.export(outfile, level, namespace_, name_='parameterdescription', ) - def hasContent_(self): - if ( - self.parameternamelist is not None or - self.parameterdescription is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'parameternamelist': - obj_ = docParamNameList.factory() - obj_.build(child_) - self.parameternamelist.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'parameterdescription': - obj_ = descriptionType.factory() - obj_.build(child_) - self.set_parameterdescription(obj_) -# end class docParamListItem - - -class docParamNameList(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, parametername=None): - if parametername is None: - self.parametername = [] - else: - self.parametername = parametername - def factory(*args_, **kwargs_): - if docParamNameList.subclass: - return docParamNameList.subclass(*args_, **kwargs_) - else: - return docParamNameList(*args_, **kwargs_) - factory = staticmethod(factory) - def get_parametername(self): return self.parametername - def set_parametername(self, parametername): self.parametername = parametername - def add_parametername(self, value): self.parametername.append(value) - def insert_parametername(self, index, value): self.parametername[index] = value - def export(self, outfile, level, namespace_='', name_='docParamNameList', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docParamNameList') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='docParamNameList'): - pass - def exportChildren(self, outfile, level, namespace_='', name_='docParamNameList'): - for parametername_ in self.parametername: - parametername_.export(outfile, level, namespace_, name_='parametername') - def hasContent_(self): - if ( - self.parametername is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'parametername': - obj_ = docParamName.factory() - obj_.build(child_) - self.parametername.append(obj_) -# end class docParamNameList - - -class docParamName(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, direction=None, ref=None, mixedclass_=None, content_=None): - self.direction = direction - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if content_ is None: - self.content_ = [] - else: - self.content_ = content_ - def factory(*args_, **kwargs_): - if docParamName.subclass: - return docParamName.subclass(*args_, **kwargs_) - else: - return docParamName(*args_, **kwargs_) - factory = staticmethod(factory) - def get_ref(self): return self.ref - def set_ref(self, ref): self.ref = ref - def get_direction(self): return self.direction - def set_direction(self, direction): self.direction = direction - def export(self, outfile, level, namespace_='', name_='docParamName', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docParamName') - outfile.write('>') - self.exportChildren(outfile, level + 1, namespace_, name_) - outfile.write('\n' % (namespace_, name_)) - def exportAttributes(self, outfile, level, namespace_='', name_='docParamName'): - if self.direction is not None: - outfile.write(' direction=%s' % (quote_attrib(self.direction), )) - def exportChildren(self, outfile, level, namespace_='', name_='docParamName'): - for item_ in self.content_: - item_.export(outfile, level, item_.name, namespace_) - def hasContent_(self): - if ( - self.ref is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('direction'): - self.direction = attrs.get('direction').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'ref': - childobj_ = docRefTextType.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'ref', childobj_) - self.content_.append(obj_) - elif child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) - self.content_.append(obj_) - d = child_.parentNode.attributes.get('direction') - if d is not None: - self.content_.insert(0, self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, - '', '[{}] '.format(d.value))) -# end class docParamName - - -class docXRefSectType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, id=None, xreftitle=None, xrefdescription=None): - self.id = id - if xreftitle is None: - self.xreftitle = [] - else: - self.xreftitle = xreftitle - self.xrefdescription = xrefdescription - def factory(*args_, **kwargs_): - if docXRefSectType.subclass: - return docXRefSectType.subclass(*args_, **kwargs_) - else: - return docXRefSectType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_xreftitle(self): return self.xreftitle - def set_xreftitle(self, xreftitle): self.xreftitle = xreftitle - def add_xreftitle(self, value): self.xreftitle.append(value) - def insert_xreftitle(self, index, value): self.xreftitle[index] = value - def get_xrefdescription(self): return self.xrefdescription - def set_xrefdescription(self, xrefdescription): self.xrefdescription = xrefdescription - def get_id(self): return self.id - def set_id(self, id): self.id = id - def export(self, outfile, level, namespace_='', name_='docXRefSectType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docXRefSectType') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='docXRefSectType'): - if self.id is not None: - outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) - def exportChildren(self, outfile, level, namespace_='', name_='docXRefSectType'): - for xreftitle_ in self.xreftitle: - showIndent(outfile, level) - outfile.write('<%sxreftitle>%s\n' % (namespace_, self.format_string(quote_xml(xreftitle_).encode(ExternalEncoding), input_name='xreftitle'), namespace_)) - if self.xrefdescription: - self.xrefdescription.export(outfile, level, namespace_, name_='xrefdescription', ) - def hasContent_(self): - if ( - self.xreftitle is not None or - self.xrefdescription is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('id'): - self.id = attrs.get('id').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'xreftitle': - xreftitle_ = '' - for text__content_ in child_.childNodes: - xreftitle_ += text__content_.nodeValue - self.xreftitle.append(xreftitle_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'xrefdescription': - obj_ = descriptionType.factory() - obj_.build(child_) - self.set_xrefdescription(obj_) -# end class docXRefSectType - - -class docCopyType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, link=None, para=None, sect1=None, internal=None): - self.link = link - if para is None: - self.para = [] - else: - self.para = para - if sect1 is None: - self.sect1 = [] - else: - self.sect1 = sect1 - self.internal = internal - def factory(*args_, **kwargs_): - if docCopyType.subclass: - return docCopyType.subclass(*args_, **kwargs_) - else: - return docCopyType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_para(self): return self.para - def set_para(self, para): self.para = para - def add_para(self, value): self.para.append(value) - def insert_para(self, index, value): self.para[index] = value - def get_sect1(self): return self.sect1 - def set_sect1(self, sect1): self.sect1 = sect1 - def add_sect1(self, value): self.sect1.append(value) - def insert_sect1(self, index, value): self.sect1[index] = value - def get_internal(self): return self.internal - def set_internal(self, internal): self.internal = internal - def get_link(self): return self.link - def set_link(self, link): self.link = link - def export(self, outfile, level, namespace_='', name_='docCopyType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docCopyType') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='docCopyType'): - if self.link is not None: - outfile.write(' link=%s' % (self.format_string(quote_attrib(self.link).encode(ExternalEncoding), input_name='link'), )) - def exportChildren(self, outfile, level, namespace_='', name_='docCopyType'): - for para_ in self.para: - para_.export(outfile, level, namespace_, name_='para') - for sect1_ in self.sect1: - sect1_.export(outfile, level, namespace_, name_='sect1') - if self.internal: - self.internal.export(outfile, level, namespace_, name_='internal') - def hasContent_(self): - if ( - self.para is not None or - self.sect1 is not None or - self.internal is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('link'): - self.link = attrs.get('link').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'para': - obj_ = docParaType.factory() - obj_.build(child_) - self.para.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'sect1': - obj_ = docSect1Type.factory() - obj_.build(child_) - self.sect1.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'internal': - obj_ = docInternalType.factory() - obj_.build(child_) - self.set_internal(obj_) -# end class docCopyType - - -class docCharType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, char=None, valueOf_=''): - self.char = char - self.valueOf_ = valueOf_ - def factory(*args_, **kwargs_): - if docCharType.subclass: - return docCharType.subclass(*args_, **kwargs_) - else: - return docCharType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_char(self): return self.char - def set_char(self, char): self.char = char - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def export(self, outfile, level, namespace_='', name_='docCharType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docCharType') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='docCharType'): - if self.char is not None: - outfile.write(' char=%s' % (quote_attrib(self.char), )) - def exportChildren(self, outfile, level, namespace_='', name_='docCharType'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') - outfile.write(value) - else: - outfile.write(quote_xml('%s' % self.valueOf_)) - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('char'): - self.char = attrs.get('char').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class docCharType - - -class docBlockQuoteType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, mixedclass_=None, para=None): - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if para is None: - self.para = [] - else: - self.para = para - def factory(*args_, **kwargs_): - if docBlockQuoteType.subclass: - return docBlockQuoteType.subclass(*args_, **kwargs_) - else: - return docBlockQuoteType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_para(self): return self.para - def set_para(self, para): self.para = para - def add_para(self, value): self.para.append(value) - def insert_para(self, index, value): self.para[index] = value - def export(self, outfile, level, namespace_='', name_='docBlockQuoteType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - self.exportAttributes(outfile, level, namespace_, name_='docBlockQuoteType') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write('/>\n') - def exportAttributes(self, outfile, level, namespace_='', name_='docBlockQuoteType'): - pass - def exportChildren(self, outfile, level, namespace_='', name_='docBlockQuoteType'): - for para_ in self.para: - para_.export(outfile, level, namespace_, name_='para') - def hasContent_(self): - if ( - self.para - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'para': - obj_ = docParaType.factory() - obj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'para', obj_) - self.para.append(obj_) -# end class docBlockQuoteType - - -class docParBlockType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, mixedclass_=None, para=None): - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if para is None: - self.para = [] - else: - self.para = para - def factory(*args_, **kwargs_): - if docParBlockType.subclass: - return docParBlockType.subclass(*args_, **kwargs_) - else: - return docParBlockType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_para(self): return self.para - def set_para(self, para): self.para = para - def add_para(self, value): self.para.append(value) - def insert_para(self, index, value): self.para[index] = value - def export(self, outfile, level, namespace_='', name_='docParBlockType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - self.exportAttributes(outfile, level, namespace_, name_='docParBlockType') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write('/>\n') - def exportAttributes(self, outfile, level, namespace_='', name_='docParBlockType'): - pass - def exportChildren(self, outfile, level, namespace_='', name_='docParBlockType'): - for para_ in self.para: - para_.export(outfile, level, namespace_, name_='para') - def hasContent_(self): - if ( - self.para - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'para': - obj_ = docParaType.factory() - obj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'para', obj_) - self.para.append(obj_) -# end class docParBlockType - - -class docEmptyType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, valueOf_=''): - self.valueOf_ = valueOf_ - def factory(*args_, **kwargs_): - if docEmptyType.subclass: - return docEmptyType.subclass(*args_, **kwargs_) - else: - return docEmptyType(*args_, **kwargs_) - factory = staticmethod(factory) - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def export(self, outfile, level, namespace_='', name_='docEmptyType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docEmptyType') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='docEmptyType'): - pass - def exportChildren(self, outfile, level, namespace_='', name_='docEmptyType'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') - outfile.write(value) - else: - outfile.write(quote_xml('%s' % self.valueOf_)) - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class docEmptyType - - -USAGE_TEXT = """ -Usage: python .py [ -s ] -Options: - -s Use the SAX parser, not the minidom parser. -""" - -def usage(): - print(USAGE_TEXT) - sys.exit(1) - - -def parse(inFileName): - doc = minidom.parse(inFileName) - rootNode = doc.documentElement - rootObj = DoxygenType.factory() - rootObj.build(rootNode) - # Enable Python to collect the space used by the DOM. - doc = None - sys.stdout.write('\n') - rootObj.export(sys.stdout, 0, name_="doxygen", - namespacedef_='') - return rootObj - - -def parseString(inString): - doc = minidom.parseString(inString) - rootNode = doc.documentElement - rootObj = DoxygenType.factory() - rootObj.build(rootNode) - # Enable Python to collect the space used by the DOM. - doc = None - sys.stdout.write('\n') - rootObj.export(sys.stdout, 0, name_="doxygen", - namespacedef_='') - return rootObj - - -def parseLiteral(inFileName): - doc = minidom.parse(inFileName) - rootNode = doc.documentElement - rootObj = DoxygenType.factory() - rootObj.build(rootNode) - # Enable Python to collect the space used by the DOM. - doc = None - sys.stdout.write('from compound import *\n\n') - sys.stdout.write('rootObj = doxygen(\n') - rootObj.exportLiteral(sys.stdout, 0, name_="doxygen") - sys.stdout.write(')\n') - return rootObj - - -def main(): - args = sys.argv[1:] - if len(args) == 1: - parse(args[0]) - else: - usage() - - -if __name__ == '__main__': - main() - #import pdb - #pdb.run('main()') diff --git a/breathe/parser/index.py b/breathe/parser/index.py deleted file mode 100644 index 3788991e..00000000 --- a/breathe/parser/index.py +++ /dev/null @@ -1,60 +0,0 @@ -""" -Generated Mon Feb 9 19:08:05 2009 by generateDS.py. -""" - -from xml.dom import minidom -from xml.parsers.expat import ExpatError - -from . import indexsuper as supermod - - -class DoxygenTypeSub(supermod.DoxygenType): - - node_type = "doxygen" - - def __init__(self, version=None, compound=None): - supermod.DoxygenType.__init__(self, version, compound) -supermod.DoxygenType.subclass = DoxygenTypeSub -# end class DoxygenTypeSub - - -class CompoundTypeSub(supermod.CompoundType): - - node_type = "compound" - - def __init__(self, kind=None, refid=None, name='', member=None): - supermod.CompoundType.__init__(self, kind, refid, name, member) -supermod.CompoundType.subclass = CompoundTypeSub -# end class CompoundTypeSub - - -class MemberTypeSub(supermod.MemberType): - - node_type = "member" - - def __init__(self, kind=None, refid=None, name=''): - supermod.MemberType.__init__(self, kind, refid, name) -supermod.MemberType.subclass = MemberTypeSub -# end class MemberTypeSub - - -class ParseError(Exception): - pass - - -class FileIOError(Exception): - pass - - -def parse(inFilename): - try: - doc = minidom.parse(inFilename) - except IOError as e: - raise FileIOError(e) - except ExpatError as e: - raise ParseError(e) - - rootNode = doc.documentElement - rootObj = supermod.DoxygenType.factory() - rootObj.build(rootNode) - return rootObj diff --git a/breathe/parser/indexsuper.py b/breathe/parser/indexsuper.py deleted file mode 100644 index 4c7126ce..00000000 --- a/breathe/parser/indexsuper.py +++ /dev/null @@ -1,359 +0,0 @@ -# -# Generated Thu Jun 11 18:43:54 2009 by generateDS.py. -# - -import sys -import getopt -from xml.dom import minidom -from xml.dom import Node - -# -# User methods -# -# Calls to the methods in these classes are generated by generateDS.py. -# You can replace these methods by re-implementing the following class -# in a module named generatedssuper.py. - -try: - from generatedssuper import GeneratedsSuper -except ImportError as exp: - - class GeneratedsSuper: - def format_string(self, input_data, input_name=''): - return input_data - def format_integer(self, input_data, input_name=''): - return '%d' % input_data - def format_float(self, input_data, input_name=''): - return '%f' % input_data - def format_double(self, input_data, input_name=''): - return '%e' % input_data - def format_boolean(self, input_data, input_name=''): - return '%s' % input_data - - -# -# If you have installed IPython you can uncomment and use the following. -# IPython is available from http://ipython.scipy.org/. -# - -## from IPython.Shell import IPShellEmbed -## args = '' -## ipshell = IPShellEmbed(args, -## banner = 'Dropping into IPython', -## exit_msg = 'Leaving Interpreter, back to program.') - -# Then use the following line where and when you want to drop into the -# IPython shell: -# ipshell(' -- Entering ipshell.\nHit Ctrl-D to exit') - -# -# Globals -# - -ExternalEncoding = 'ascii' - -# -# Support/utility functions. -# - -def showIndent(outfile, level): - for idx in range(level): - outfile.write(' ') - -def quote_xml(inStr): - s1 = (isinstance(inStr, basestring) and inStr or - '%s' % inStr) - s1 = s1.replace('&', '&') - s1 = s1.replace('<', '<') - s1 = s1.replace('>', '>') - return s1 - -def quote_attrib(inStr): - s1 = (isinstance(inStr, basestring) and inStr or - '%s' % inStr) - s1 = s1.replace('&', '&') - s1 = s1.replace('<', '<') - s1 = s1.replace('>', '>') - if '"' in s1: - if "'" in s1: - s1 = '"%s"' % s1.replace('"', """) - else: - s1 = "'%s'" % s1 - else: - s1 = '"%s"' % s1 - return s1 - -def quote_python(inStr): - s1 = inStr - if s1.find("'") == -1: - if s1.find('\n') == -1: - return "'%s'" % s1 - else: - return "'''%s'''" % s1 - else: - if s1.find('"') != -1: - s1 = s1.replace('"', '\\"') - if s1.find('\n') == -1: - return '"%s"' % s1 - else: - return '"""%s"""' % s1 - - -class MixedContainer: - # Constants for category: - CategoryNone = 0 - CategoryText = 1 - CategorySimple = 2 - CategoryComplex = 3 - # Constants for content_type: - TypeNone = 0 - TypeText = 1 - TypeString = 2 - TypeInteger = 3 - TypeFloat = 4 - TypeDecimal = 5 - TypeDouble = 6 - TypeBoolean = 7 - def __init__(self, category, content_type, name, value): - self.category = category - self.content_type = content_type - self.name = name - self.value = value - def getCategory(self): - return self.category - def getContenttype(self, content_type): - return self.content_type - def getValue(self): - return self.value - def getName(self): - return self.name - - -class _MemberSpec(object): - def __init__(self, name='', data_type='', container=0): - self.name = name - self.data_type = data_type - self.container = container - def set_name(self, name): self.name = name - def get_name(self): return self.name - def set_data_type(self, data_type): self.data_type = data_type - def get_data_type(self): return self.data_type - def set_container(self, container): self.container = container - def get_container(self): return self.container - - -# -# Data representation classes. -# - -class DoxygenType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, version=None, compound=None): - self.version = version - if compound is None: - self.compound = [] - else: - self.compound = compound - def factory(*args_, **kwargs_): - if DoxygenType.subclass: - return DoxygenType.subclass(*args_, **kwargs_) - else: - return DoxygenType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_compound(self): return self.compound - def set_compound(self, compound): self.compound = compound - def add_compound(self, value): self.compound.append(value) - def insert_compound(self, index, value): self.compound[index] = value - def get_version(self): return self.version - def set_version(self, version): self.version = version - def hasContent_(self): - if ( - self.compound is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('version'): - self.version = attrs.get('version').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'compound': - obj_ = CompoundType.factory() - obj_.build(child_) - self.compound.append(obj_) -# end class DoxygenType - - -class CompoundType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, kind=None, refid=None, name=None, member=None): - self.kind = kind - self.refid = refid - self.name = name - if member is None: - self.member = [] - else: - self.member = member - def factory(*args_, **kwargs_): - if CompoundType.subclass: - return CompoundType.subclass(*args_, **kwargs_) - else: - return CompoundType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_name(self): return self.name - def set_name(self, name): self.name = name - def get_member(self): return self.member - def set_member(self, member): self.member = member - def add_member(self, value): self.member.append(value) - def insert_member(self, index, value): self.member[index] = value - def get_kind(self): return self.kind - def set_kind(self, kind): self.kind = kind - def get_refid(self): return self.refid - def set_refid(self, refid): self.refid = refid - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('kind'): - self.kind = attrs.get('kind').value - if attrs.get('refid'): - self.refid = attrs.get('refid').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'name': - name_ = '' - for text__content_ in child_.childNodes: - name_ += text__content_.nodeValue - self.name = name_ - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'member': - obj_ = MemberType.factory() - obj_.build(child_) - self.member.append(obj_) -# end class CompoundType - - -class MemberType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, kind=None, refid=None, name=None): - self.kind = kind - self.refid = refid - self.name = name - def factory(*args_, **kwargs_): - if MemberType.subclass: - return MemberType.subclass(*args_, **kwargs_) - else: - return MemberType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_name(self): return self.name - def set_name(self, name): self.name = name - def get_kind(self): return self.kind - def set_kind(self, kind): self.kind = kind - def get_refid(self): return self.refid - def set_refid(self, refid): self.refid = refid - def hasContent_(self): - if ( - self.name is not None - ): - return True - else: - return False - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('kind'): - self.kind = attrs.get('kind').value - if attrs.get('refid'): - self.refid = attrs.get('refid').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'name': - name_ = '' - for text__content_ in child_.childNodes: - name_ += text__content_.nodeValue - self.name = name_ -# end class MemberType - - -USAGE_TEXT = """ -Usage: python .py [ -s ] -Options: - -s Use the SAX parser, not the minidom parser. -""" - -def usage(): - print(USAGE_TEXT) - sys.exit(1) - - -def parse(inFileName): - doc = minidom.parse(inFileName) - rootNode = doc.documentElement - rootObj = DoxygenType.factory() - rootObj.build(rootNode) - # Enable Python to collect the space used by the DOM. - doc = None - sys.stdout.write('\n') - rootObj.export(sys.stdout, 0, name_="doxygenindex", - namespacedef_='') - return rootObj - - -def parseString(inString): - doc = minidom.parseString(inString) - rootNode = doc.documentElement - rootObj = DoxygenType.factory() - rootObj.build(rootNode) - # Enable Python to collect the space used by the DOM. - doc = None - sys.stdout.write('\n') - rootObj.export(sys.stdout, 0, name_="doxygenindex", - namespacedef_='') - return rootObj - - -def parseLiteral(inFileName): - doc = minidom.parse(inFileName) - rootNode = doc.documentElement - rootObj = DoxygenType.factory() - rootObj.build(rootNode) - # Enable Python to collect the space used by the DOM. - doc = None - sys.stdout.write('from index import *\n\n') - sys.stdout.write('rootObj = doxygenindex(\n') - rootObj.exportLiteral(sys.stdout, 0, name_="doxygenindex") - sys.stdout.write(')\n') - return rootObj - - -def main(): - args = sys.argv[1:] - if len(args) == 1: - parse(args[0]) - else: - usage() - - - - -if __name__ == '__main__': - main() - #import pdb - #pdb.run('main()') diff --git a/breathe/renderer/sphinxrenderer.py b/breathe/renderer/sphinxrenderer.py index dff0c982..39c118c0 100644 --- a/breathe/renderer/sphinxrenderer.py +++ b/breathe/renderer/sphinxrenderer.py @@ -1,7 +1,7 @@ import os import sphinx -from breathe.parser import compound, compoundsuper, DoxygenCompoundParser +from breathe import parser from breathe.project import ProjectInfo from breathe.renderer import RenderContext from breathe.renderer.filter import Filter @@ -493,7 +493,7 @@ def __init__( state, document: nodes.document, target_handler: TargetHandler, - compound_parser: DoxygenCompoundParser, + compound_parser: parser.DoxygenCompoundParser, filter_: Filter, ): self.app = app diff --git a/pyproject.toml b/pyproject.toml index 15f421bb..9880c562 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,3 +1,6 @@ +[build-system] +requires = ["setuptools", "jinja2", "perfect-hash"] + [tool.black] line-length = 100 extend-exclude = ''' diff --git a/requirements/development.txt b/requirements/development.txt index 35b1c56d..f2fd457a 100644 --- a/requirements/development.txt +++ b/requirements/development.txt @@ -11,4 +11,6 @@ types-Pygments black==22.3.0 sphinx-copybutton -furo \ No newline at end of file +furo + +perfect-hash diff --git a/setup.cfg b/setup.cfg index dbdc61b9..88eee913 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,5 +1,5 @@ [metadata] -license_file = LICENSE +license_files = LICENSE [flake8] max-line-length = 100 diff --git a/setup.py b/setup.py index eed8907d..0b6a187c 100644 --- a/setup.py +++ b/setup.py @@ -1,13 +1,24 @@ # -*- coding: utf-8 -*- try: - from setuptools import setup, find_packages + from setuptools import setup, find_packages, Extension except ImportError: import distribute_setup distribute_setup.use_setuptools() - from setuptools import setup, find_packages + from setuptools import setup, find_packages, Extension import sys +import os.path +from setuptools.command.build_ext import build_ext +from distutils import log +from distutils.dep_util import newer_group +from distutils.dir_util import mkpath + +# add xml_parser_generator to the import path list +base_dir = os.path.dirname(os.path.realpath(__file__)) +sys.path.insert(0,os.path.join(base_dir,'xml_parser_generator')) + +import make_parser # Keep in sync with breathe/__init__.py __version__ __version__ = "4.35.0" @@ -24,6 +35,58 @@ sys.exit(1) +class CustomBuildExt(build_ext): + """Extend build_ext to automatically generate parser.c""" + + SCHEMA_FILE = os.path.join('xml_parser_generator','schema.json') + MODULE_TEMPLATE = os.path.join('xml_parser_generator','module_template.c') + STUBS_TEMPLATE = os.path.join('xml_parser_generator','stubs_template.pyi') + MAKER_SOURCE = os.path.join('xml_parser_generator','make_parser.py') + + DEPENDENCIES = [SCHEMA_FILE,MODULE_TEMPLATE,STUBS_TEMPLATE,MAKER_SOURCE] + + def build_extensions(self): + assert len(self.extensions) == 1 + + if not self.debug: + # The parser doesn't do any complicated calculation; its speed will + # mostly depend on file read and memory allocation speed. Thus it's + # better to optimize for size. + c = self.compiler.compiler_type + if c == 'msvc': + self.extensions[0].extra_compile_args = ['/O1'] + elif c in {'unix','cygwin','mingw32'}: + self.extensions[0].extra_compile_args = ['-Os'] + self.extensions[0].extra_link_args = ['-s'] + + source = os.path.join(self.build_temp,self.extensions[0].name+'.c') + + # put the stub file in the same place that the extension module will be + ext_dest = self.get_ext_fullpath(self.extensions[0].name) + libdir = os.path.dirname(ext_dest) + stub = os.path.join(libdir,self.extensions[0].name+'.pyi') + + mkpath(self.build_temp,dry_run=self.dry_run) + mkpath(libdir,dry_run=self.dry_run) + + if (self.force + or newer_group(self.DEPENDENCIES,source) + or newer_group(self.DEPENDENCIES,stub)): + log.info(f'generating "{source}" and "{stub}" from templates') + if not self.dry_run: + make_parser.generate_from_json( + self.SCHEMA_FILE, + self.MODULE_TEMPLATE, + self.STUBS_TEMPLATE, + source, + stub) + else: + log.debug(f'"{source}" and "{stub}" are up-to-date') + + self.extensions[0].sources.append(source) + + super().build_extensions() + setup( name="breathe", version=__version__, @@ -51,6 +114,21 @@ ], platforms="any", packages=find_packages(), + ext_package='breathe', + ext_modules=[ + Extension( + '_parser', + [], # source is generated by CustomBuildExt + depends=CustomBuildExt.DEPENDENCIES, + libraries=['expat'], + define_macros=[ + ('PARSER_PY_LIMITED_API','0x03070000'), # set Stable ABI version to 3.7 + ('MODULE_NAME','_parser'), + ('FULL_MODULE_STR','"breathe._parser"') + ], + py_limited_api=True + ) + ], include_package_data=True, entry_points={ "console_scripts": [ @@ -58,4 +136,5 @@ ], }, install_requires=requires, + cmdclass={'build_ext': CustomBuildExt} ) diff --git a/tests/test_renderer.py b/tests/test_renderer.py index a858c65d..8f159a18 100644 --- a/tests/test_renderer.py +++ b/tests/test_renderer.py @@ -1,14 +1,15 @@ import os import pytest +import sphinx.locale import sphinx.addnodes import sphinx.environment -from breathe.parser.compound import ( - compounddefTypeSub, - linkedTextTypeSub, - memberdefTypeSub, - paramTypeSub, - refTypeSub, +from breathe.parser import ( + Node_compounddefType, + Node_linkedTextType, + Node_memberdefType, + Node_paramType, + Node_refType, MixedContainer, ) from breathe.renderer.sphinxrenderer import SphinxRenderer @@ -74,34 +75,6 @@ def __init__(self, **kwargs): WrappedDoxygenNode.__init__(self, None, **kwargs) -class WrappedLinkedText(linkedTextTypeSub, WrappedDoxygenNode): - """A test wrapper of Doxygen linked text.""" - - def __init__(self, **kwargs): - WrappedDoxygenNode.__init__(self, linkedTextTypeSub, **kwargs) - - -class WrappedMemberDef(memberdefTypeSub, WrappedDoxygenNode): - """A test wrapper of Doxygen class/file/namespace member symbol such as a function declaration.""" - - def __init__(self, **kwargs): - WrappedDoxygenNode.__init__(self, memberdefTypeSub, **kwargs) - - -class WrappedParam(paramTypeSub, WrappedDoxygenNode): - """A test wrapper of Doxygen parameter.""" - - def __init__(self, **kwargs): - WrappedDoxygenNode.__init__(self, paramTypeSub, **kwargs) - - -class WrappedRef(refTypeSub, WrappedDoxygenNode): - """A test wrapper of Doxygen ref.""" - - def __init__(self, node_name, **kwargs): - WrappedDoxygenNode.__init__(self, refTypeSub, node_name, **kwargs) - - class WrappedCompoundDef(compounddefTypeSub, WrappedDoxygenNode): """A test wrapper of Doxygen compound definition.""" @@ -317,7 +290,7 @@ def render( def test_render_func(app): - member_def = WrappedMemberDef( + member_def = Node_memberdefType( kind="function", definition="void foo", type_="void", @@ -325,7 +298,7 @@ def test_render_func(app): argsstring="(int)", virt="non-virtual", param=[ - WrappedParam(type_=WrappedLinkedText(content_=[WrappedMixedContainer(value="int")])) + Node_paramType(type_=Node_linkedTextType([WrappedMixedContainer(value="int")])) ], ) signature = find_node(render(app, member_def), "desc_signature") @@ -348,7 +321,7 @@ def test_render_func(app): def test_render_typedef(app): - member_def = WrappedMemberDef( + member_def = Node_memberdefType( kind="typedef", definition="typedef int foo", type_="int", name="foo" ) signature = find_node(render(app, member_def), "desc_signature") @@ -356,7 +329,7 @@ def test_render_typedef(app): def test_render_c_typedef(app): - member_def = WrappedMemberDef( + member_def = Node_memberdefType( kind="typedef", definition="typedef unsigned int bar", type_="unsigned int", name="bar" ) signature = find_node(render(app, member_def, domain="c"), "desc_signature") @@ -364,7 +337,7 @@ def test_render_c_typedef(app): def test_render_c_function_typedef(app): - member_def = WrappedMemberDef( + member_def = Node_memberdefType( kind="typedef", definition="typedef void* (*voidFuncPtr)(float, int)", type_="void* (*", @@ -385,7 +358,7 @@ def test_render_c_function_typedef(app): def test_render_using_alias(app): - member_def = WrappedMemberDef( + member_def = Node_memberdefType( kind="typedef", definition="using foo = int", type_="int", name="foo" ) signature = find_node(render(app, member_def), "desc_signature") @@ -393,7 +366,7 @@ def test_render_using_alias(app): def test_render_const_func(app): - member_def = WrappedMemberDef( + member_def = Node_memberdefType( kind="function", definition="void f", type_="void", @@ -407,7 +380,7 @@ def test_render_const_func(app): def test_render_lvalue_func(app): - member_def = WrappedMemberDef( + member_def = Node_memberdefType( kind="function", definition="void f", type_="void", @@ -421,7 +394,7 @@ def test_render_lvalue_func(app): def test_render_rvalue_func(app): - member_def = WrappedMemberDef( + member_def = Node_memberdefType( kind="function", definition="void f", type_="void", @@ -435,7 +408,7 @@ def test_render_rvalue_func(app): def test_render_const_lvalue_func(app): - member_def = WrappedMemberDef( + member_def = Node_memberdefType( kind="function", definition="void f", type_="void", @@ -450,7 +423,7 @@ def test_render_const_lvalue_func(app): def test_render_const_rvalue_func(app): - member_def = WrappedMemberDef( + member_def = Node_memberdefType( kind="function", definition="void f", type_="void", @@ -465,7 +438,7 @@ def test_render_const_rvalue_func(app): def test_render_variable_initializer(app): - member_def = WrappedMemberDef( + member_def = Node_memberdefType( kind="variable", definition="const int EOF", type_="const int", @@ -477,20 +450,20 @@ def test_render_variable_initializer(app): def test_render_define_initializer(app): - member_def = WrappedMemberDef( + member_def = Node_memberdefType( kind="define", name="MAX_LENGTH", - initializer=WrappedLinkedText(content_=[WrappedMixedContainer(value="100")]), + initializer=Node_linkedTextType([WrappedMixedContainer(value="100")]), ) signature_w_initializer = find_node( render(app, member_def, show_define_initializer=True), "desc_signature" ) assert signature_w_initializer.astext() == "MAX_LENGTH 100" - member_def_no_show = WrappedMemberDef( + member_def_no_show = Node_memberdefType( kind="define", name="MAX_LENGTH_NO_INITIALIZER", - initializer=WrappedLinkedText(content_=[WrappedMixedContainer(value="100")]), + initializer=Node_linkedTextType([WrappedMixedContainer(value="100")]), ) signature_wo_initializer = find_node( @@ -501,7 +474,7 @@ def test_render_define_initializer(app): def test_render_define_no_initializer(app): sphinx.addnodes.setup(app) - member_def = WrappedMemberDef(kind="define", name="USE_MILK") + member_def = Node_memberdefType(kind="define", name="USE_MILK") signature = find_node(render(app, member_def), "desc_signature") assert signature.astext() == "USE_MILK" @@ -510,13 +483,13 @@ def test_render_innergroup(app): refid = "group__innergroup" mock_compound_parser = MockCompoundParser( { - refid: WrappedCompoundDef( + refid: Node_compounddefType( kind="group", compoundname="InnerGroup", briefdescription="InnerGroup" ) } ) - ref = WrappedRef("InnerGroup", refid=refid) - compound_def = WrappedCompoundDef( + ref = Node_refType(["InnerGroup"], refid=refid) + compound_def = Node_compounddefType( kind="group", compoundname="OuterGroup", briefdescription="OuterGroup", innergroup=[ref] ) assert all( diff --git a/xml_parser_generator/CMakeLists.txt b/xml_parser_generator/CMakeLists.txt new file mode 100644 index 00000000..1cf7158a --- /dev/null +++ b/xml_parser_generator/CMakeLists.txt @@ -0,0 +1,38 @@ +# Note: CMake and this file are not neccessary to build and install Breathe. +# This exists to aid in development. + +cmake_minimum_required(VERSION 3.26) +project(doxyparse LANGUAGES C) + +find_package(Python3 REQUIRED COMPONENTS Interpreter Development.Module) +#find_package(Python3 REQUIRED COMPONENTS Interpreter Development.Module Development.SABIModule) +find_package(EXPAT) + +set(module_name parser) + +if (MSVC) + add_compile_options(/W4) +else() + add_compile_options(-Wall -Wextra -Werror=implicit-function-declaration) +endif() + +add_custom_command( + OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${module_name}.c ${CMAKE_CURRENT_SOURCE_DIR}/stubs_template.pyi + COMMAND Python3::Interpreter + ${CMAKE_CURRENT_SOURCE_DIR}/make_parser.py + ${CMAKE_CURRENT_SOURCE_DIR}/schema.json + ${CMAKE_CURRENT_SOURCE_DIR}/module_template.c + ${CMAKE_CURRENT_SOURCE_DIR}/stubs_template.pyi + ${CMAKE_CURRENT_BINARY_DIR}/${module_name}.c + ${CMAKE_CURRENT_BINARY_DIR}/${module_name}.pyi + DEPENDS + ${CMAKE_CURRENT_SOURCE_DIR}/make_parser.py + ${CMAKE_CURRENT_SOURCE_DIR}/schema.json + ${CMAKE_CURRENT_SOURCE_DIR}/module_template.c + ${CMAKE_CURRENT_SOURCE_DIR}/stubs_template.pyi + VERBATIM) + +Python3_add_library(${module_name} MODULE WITH_SOABI ${CMAKE_CURRENT_BINARY_DIR}/${module_name}.c) +#Python3_add_library(${module_name} MODULE USE_SABI 3.7 WITH_SOABI ${CMAKE_CURRENT_BINARY_DIR}/${module_name}.c) +target_link_libraries(${module_name} PRIVATE EXPAT::EXPAT) +target_compile_definitions(${module_name} PRIVATE MODULE_NAME=${module_name}) diff --git a/xml_parser_generator/make_parser.py b/xml_parser_generator/make_parser.py new file mode 100644 index 00000000..b7670c5b --- /dev/null +++ b/xml_parser_generator/make_parser.py @@ -0,0 +1,644 @@ +from __future__ import annotations + +import sys +import json +import enum +import dataclasses +import functools +import keyword +import collections + +from typing import Any,Callable,cast,Literal,NamedTuple,NoReturn,TYPE_CHECKING,TypeVar + +import jinja2 +import perfect_hash + +if TYPE_CHECKING: + from collections.abc import Iterable,Sequence + +T = TypeVar('T') + + +# The minimum number of items a set should have before using a hash-based +# lookup. If fewer, the strings are compared one-by-one instead. +HASH_LOOKUP_THRESHOLD = 8 + +SPLIT_LINE_ITEM_THRESHOLD = 5 + +BUILTIN_ATTR_SCHEMA_TYPES = [('string','str'),('DoxBool','bool'),('integer','int'),('empty','None')] + + +def comma_join(items: Sequence[str],indent:int=4): + if len(items) < SPLIT_LINE_ITEM_THRESHOLD: + return ', '.join(items) + + return (',\n' + ' '*indent).join(items) + +class ContentType(enum.Enum): + bare = enum.auto() + tuple = enum.auto() + union = enum.auto() + +@dataclasses.dataclass(slots=True) +class TypeRef: + name: str + py_name: str + type: str|SchemaType + is_list: bool + min_items: Literal[0] | Literal[1] + + def py_type(self,as_param=False) -> str: + assert isinstance(self.type,SchemaType) + if self.is_list: + container = 'Iterable' if as_param else 'FrozenList' + return f'{container}[{self.type.py_name}]' + if self.min_items == 0: + return f'{self.type.py_name} | None' + return self.type.py_name + +@dataclasses.dataclass(slots=True) +class Attribute: + name: str + py_name: str + type: str|AttributeType + optional: bool + + def py_type(self,as_param=False) -> str: + assert isinstance(self.type,SchemaType) + if self.optional: + return f'{self.type.py_name} | None' + return self.type.py_name + +@dataclasses.dataclass(slots=True) +class SchemaType: + name: str + + def __str__(self): + return self.name + + def content_names(self) -> Iterable[str]: + return [] + + @property + def py_name(self) -> str: + raise NotImplementedError + +@dataclasses.dataclass(slots=True) +class AttributeType(SchemaType): + pass + +@dataclasses.dataclass(slots=True) +class BuiltinType(SchemaType): + py_name: str + +class SpType(BuiltinType): + __slots__ = () + +class BuiltinAttributeType(BuiltinType,AttributeType): + __slots__ = () + +class OtherAttrAction(enum.Enum): + ignore = enum.auto() + error = enum.auto() + +@dataclasses.dataclass(slots=True) +class ElementType(SchemaType): + bases: list[str|SchemaType] + attributes: dict[str,Attribute] + other_attr: OtherAttrAction + children: dict[str,TypeRef] + used_directly: bool + + def fields(self) -> Iterable[TypeRef|Attribute]: + yield from self.attributes.values() + yield from self.children.values() + + def all_fields(self) -> Iterable[TypeRef|Attribute]: + for b in self.bases: + if isinstance(b,ElementType): + yield from b.all_fields() + yield from self.fields() + + @property + def py_name(self) -> str: + return f'Node_{self.name}' + +@dataclasses.dataclass(slots=True) +class TagOnlyElement(ElementType): + pass + +@dataclasses.dataclass(slots=True) +class ListElement(ElementType): + min_items: int + content: dict[str,str|SchemaType] + content_type: ContentType + allow_text: bool + sp_tag: str|None = None + + def content_names(self) -> Iterable[str]: + for b in self.bases: + assert isinstance(b,SchemaType) + yield from b.content_names() + yield from self.content + + def all_content(self): + for b in self.bases: + if isinstance(b,ListElement): + yield from b.content.values() + yield from self.content.values() + + def py_item_type_union_size(self) -> int: + size = len(self.content) if self.content_type == ContentType.union else 0 + for b in self.bases: + if isinstance(b,ListElement): + size += b.py_item_type_union_size() + return size + + def py_union_ref(self) -> list[str]: + types = self.py_union_list() + if len(types) <= 1: return types + return ['ListItem_'+self.name] + + def py_union_list(self) -> list[str]: + by_type = collections.defaultdict(list) + for name,t in self.content.items(): + assert isinstance(t,SchemaType) + by_type[t.py_name].append(name) + types = ['TaggedValue[Literal[{}], {}]'.format( + comma_join(sorted(f"'{n}'" for n in names),26), + t) for t,names in by_type.items()] + str_included = False + for b in self.bases: + if isinstance(b,ListElement): + types.extend(b.py_union_ref()) + if b.allow_text: str_included = True + if self.allow_text and not str_included: + types.append('str') + return types + +@dataclasses.dataclass(slots=True) +class Schema: + roots: dict[str,str|SchemaType] + types: dict[str,SchemaType] + +class EnumEntry(NamedTuple): + xml: str + id: str + +@dataclasses.dataclass(slots=True) +class SchemaEnum(AttributeType): + children: list[EnumEntry] + hash: HashData | None = None + + def any_renamed(self) -> bool: + return any(c.xml != c.id for c in self.children) + + @property + def py_name(self) -> str: + return self.name + +@dataclasses.dataclass(slots=True) +class SchemaCharEnum(AttributeType): + values: str + + @property + def py_name(self) -> str: + return self.name + +def unknown_type_error(ref: str,context: str,is_element: bool) -> NoReturn: + thing = 'element' if is_element else 'attribute' + raise ValueError(f'{thing} "{context}" has undefined type "{ref}"') + +def check_type_ref(schema: Schema,ref: str,context: str,is_element: bool=True) -> SchemaType: + t = schema.types.get(ref) + if t is None: + unknown_type_error(ref,context,is_element) + return t + +def check_attr_type_ref(schema: Schema,ref: str,context: str) -> AttributeType: + r = check_type_ref(schema,ref,context,False) + if isinstance(r,AttributeType): + return r + + raise ValueError(f'attribute "{context}" has incompatible type "{ref}"') + +def check_py_name(name: str) -> None: + if (not name.isidentifier()) or keyword.iskeyword(name): + raise ValueError(f'"{name}" is not a valid Python identifier') + if name == '_children': + raise ValueError('the name "_children" is reserved by the parser generator') + +def resolve_refs(schema: Schema) -> tuple[list[str],list[str]]: + """Check that all referenced types exist and return the lists of all + element names and attribute names""" + + elements: set[str] = set() + attributes: set[str] = set() + + def check_element_type_defined(name: str,ref: str) -> SchemaType: + t = check_type_ref(schema,ref,name) + if isinstance(t,ElementType): + t.used_directly = True + return t + + for name,r in schema.roots.items(): + elements.add(name) + schema.roots[name] = check_element_type_defined(name,cast(str,r)) + + for typename,t in schema.types.items(): + if not t.name: t.name = typename + + if isinstance(t,ElementType): + # TODO: check for recursive bases + for i,b in enumerate(t.bases): + b_type = schema.types.get(cast(str,b)) + if b_type is None: + raise ValueError(f'type "{typename}" has undefined base "{b}"') + if not isinstance(b_type,ElementType): + raise ValueError(f'"{b}" cannot be used as a base') + if isinstance(b_type,ListElement): + if not isinstance(t,ListElement): + raise ValueError(f'non-list elements cannot use list elements as bases') + if b_type.content_type != t.content_type: + raise ValueError(f'list elements of one type cannot use list elements of another type as bases') + t.bases[i] = b_type + for name,child in t.children.items(): + child.name = name + if not child.py_name: child.py_name = name + check_py_name(child.py_name) + elements.add(name) + child.type = check_element_type_defined(f'{typename}.{name}',cast(str,child.type)) + for name,attr in t.attributes.items(): + attr.name = name + if not attr.py_name: attr.py_name = name + check_py_name(attr.py_name) + attributes.add(name) + t.attributes[name].type = check_attr_type_ref(schema,cast(str,attr.type),name) + if isinstance(t,ListElement): + for name,r in t.content.items(): + elements.add(name) + t.content[name] = check_element_type_defined(f'{typename}.{name}',cast(str,r)) + + elements.update(schema.roots); + + return sorted(elements),sorted(attributes) + +class HashData(NamedTuple): + salt1: str + salt2: str + g: list[int] + +def generate_hash(items: list[str]) -> HashData: + try: + f1,f2,g = perfect_hash.generate_hash(items) + return HashData(f1.salt,f2.salt,g) + except ValueError: + print(items,file=sys.stderr) + raise + +def collect_field_names(all_fields: set[str],cur_fields: set[str],refs: Iterable[Attribute|TypeRef],type_name: str) -> None: + for ref in refs: + all_fields.add(ref.py_name) + if ref.py_name in cur_fields: + raise ValueError(f'python name "{ref.py_name}" appears more than once in "{type_name}"') + cur_fields.add(ref.py_name) + +def make_env(schema: Schema) -> jinja2.Environment: + elements,attributes = resolve_refs(schema) + tag_names: set[str] = set(schema.roots) + py_field_name_set: set[str] = set() + char_enum_chars: set[str] = set() + list_element_field_counts: set[int] = set() + tagonly_and_tuple_field_counts: set[int] = set() + tuple_field_counts: set[int] = set() + + def field_count(t) -> int: + if not isinstance(t,ElementType): return 0 + return len(t.attributes) + len(t.children) + sum(cast(int,field_count(b)) for b in t.bases) + + for t in schema.types.values(): + if isinstance(t,SchemaEnum): + if len(t.children) >= HASH_LOOKUP_THRESHOLD: + t.hash = generate_hash([item.xml for item in t.children]) + elif isinstance(t,SchemaCharEnum): + char_enum_chars.update(t.values) + elif isinstance(t,ElementType): + fields: set[str] = set() + collect_field_names(py_field_name_set,fields,t.attributes.values(),t.name) + collect_field_names(py_field_name_set,fields,t.children.values(),t.name) + + if isinstance(t,TagOnlyElement): + if t.used_directly: + tagonly_and_tuple_field_counts.add(field_count(t)) + elif isinstance(t,ListElement): + if t.used_directly: + list_element_field_counts.add(field_count(t)) + if t.content_type == ContentType.union: + tag_names.update(t.content) + elif t.content_type == ContentType.tuple: + tuple_field_counts.add(len(t.content)) + tagonly_and_tuple_field_counts.add(len(t.content)) + + py_field_names = sorted(py_field_name_set) + + tmpl_env = jinja2.Environment( + block_start_string='{%', + block_end_string='%}', + variable_start_string='{$', + variable_end_string='$}', + comment_start_string='/*#', + comment_end_string='#*/', + line_statement_prefix='//%', + line_comment_prefix='//#', + autoescape=False) + + def has_attributes(t): + if not isinstance(t,ElementType): + return False + return t.attributes or any(has_attributes(b) for b in t.bases) + + def has_children(t): + if not isinstance(t,ElementType): + return False + return t.children or any(has_children(b) for b in t.bases) + + def has_children_or_content(t): + if not isinstance(t,ElementType): + return False + return t.children or (isinstance(t,ListElement) and t.content) or any(has_children_or_content(b) for b in t.bases) + + def has_children_or_tuple_content(t): + if not isinstance(t,ElementType): + return False + return (t.children + or (isinstance(t,ListElement) and t.content_type == ContentType.tuple and len(t.content) > 1) + or any(has_children_or_tuple_content(b) for b in t.bases)) + + def base_offsets(t): + if not isinstance(t,ElementType): + return tmpl_env.undefined() + total = 0 + for b in t.bases: + assert isinstance(b,SchemaType) + yield b,total + total += field_count(b) + yield None,total + + def list_type_or_base(t): + if not isinstance(t,ElementType): return False + return isinstance(t,ListElement) or any(list_type_or_base(b) for b in t.bases) + + def allow_text(t): + if not isinstance(t,ListElement): return False + return t.allow_text or any(allow_text(b) for b in t.bases) + + def content_type(ct): + def inner(t): + if not isinstance(t,ListElement): return False + return t.content_type == ct + return inner + + def children(t): + if not isinstance(t,ElementType): + return tmpl_env.undefined() + return t.children.values() + + def get_attributes(t): + if not isinstance(t,ElementType): + return tmpl_env.undefined() + return t.attributes.values() + + def content(t): + if not isinstance(t,ListElement): + return tmpl_env.undefined() + return t.content.items() + + def used_directly(t): + return isinstance(t,ElementType) and t.used_directly + + def optional(ref: TypeRef|Attribute) -> bool: + if isinstance(ref,TypeRef): + return ref.is_list or ref.min_items == 0 + return ref.optional + + def error(msg): + raise TypeError(msg) + + class Once: + def __init__(self,content): + self.content = content + self.used = False + + def __call__(self): + if self.used: return '' + self.used = True + return self.content + + for t in schema.types.values(): + if isinstance(t,ElementType) and any(field_count(cast(ElementType,b)) for b in t.bases): + raise ValueError( + 'elements having bases that have "attributes" or "children" are not currently supported') + + tmpl_env.tests.update({ + 'element': (lambda x: isinstance(x,ElementType)), + 'tagonly_e': (lambda x: isinstance(x,TagOnlyElement)), + 'list_e': list_type_or_base, + 'builtin_t': (lambda x: isinstance(x,BuiltinType)), + 'enumeration_t': (lambda x: isinstance(x,SchemaEnum)), + 'char_enum_t': (lambda x: isinstance(x,SchemaCharEnum)), + 'appends_str': (lambda x: isinstance(x,SpType)), + 'used_directly': used_directly, + 'allow_text': allow_text, + 'has_attributes': has_attributes, + 'has_children': has_children, + 'has_children_or_content': has_children_or_content, + 'has_fields': lambda x: field_count(x) > 0, + 'has_children_or_tuple_content': has_children_or_tuple_content, + 'content_bare': content_type(ContentType.bare), + 'content_tuple': content_type(ContentType.tuple), + 'content_union': content_type(ContentType.union), + 'optional': optional}) + tmpl_env.filters.update({ + 'field_count': field_count, + 'base_offsets': base_offsets, + 'children': children, + 'attributes': get_attributes, + 'content': content, + 'error': error, + 'Once': Once}) + tmpl_env.globals.update({ + 'types': list(schema.types.values()), + 'root_elements': list(schema.roots.items()), + 'element_names': elements, + 'attribute_names': attributes, + 'py_field_names': py_field_names, + 'e_hash': generate_hash(elements), + 'a_hash': generate_hash(attributes), + 'py_f_hash': generate_hash(py_field_names), + 'union_tag_names': sorted(tag_names), + 'char_enum_chars': {c:i for i,c in enumerate(sorted(char_enum_chars))}, + 'list_element_field_counts': list(list_element_field_counts), + 'tagonly_and_tuple_field_counts': list(tagonly_and_tuple_field_counts), + 'tuple_field_counts': list(tuple_field_counts), + 'OtherAttrAction': OtherAttrAction}) + + return tmpl_env + +class _NoDefault: pass +_NO_DEFAULT = _NoDefault() +def get_json_value(conv: Callable[[Any,str],T],context: str,d: dict[str,Any],key:str,default: T|_NoDefault=_NO_DEFAULT) -> T: + r = d.get(key,_NO_DEFAULT) + if r is _NO_DEFAULT: + if default is _NO_DEFAULT: + raise ValueError(f'missing value for "{context}.{key}"') + return cast(T,default) + return conv(r,context) + +def check_simple(t: type[T],name: str) -> Callable[[Any,str],T]: + def inner(x,context:str) -> T: + if isinstance(x,t): + return x + raise TypeError(f'value for "{context}" must be {name}') + return inner +get_json_bool = functools.partial(get_json_value,check_simple(bool,'a boolean')) +get_json_int = functools.partial(get_json_value,check_simple(int,'an integer')) + +check_string = check_simple(str,'a string') +get_json_str = functools.partial(get_json_value,check_string) + +check_obj = check_simple(cast(type[dict[str,Any]],dict),'an object') +get_json_obj = functools.partial(get_json_value,check_obj) + +check_list = check_simple(list,'an array') + +def get_json_mapping(item_conv: Callable[[Any,str],T],context: str,d: dict,key: str,default: dict[str,T]|_NoDefault=_NO_DEFAULT) -> dict[str,T]: + def check(x,context): + x = check_obj(x,context) + return {key:item_conv(value,f'{context}.{key}') for key,value in x.items()} + return get_json_value(check,context,d,key,default) + +def get_json_list(item_conv: Callable[[Any,str],T],context: str,d: dict,key: str,default: list[T]|_NoDefault=_NO_DEFAULT) -> list[T]: + def check(x,context) -> list[T]: + x = check_list(x,context) + return [item_conv(value,f'{context}[{i}]') for i,value in enumerate(x)] + return get_json_value(check,context,d,key,default) + +def check_zero_or_one(x,context:str) -> Literal[0] | Literal[1]: + if x == 0: return 0 + if x == 1: return 1 + raise TypeError(f'value for "{context}" must be 0 or 1') +get_json_zero_or_one = functools.partial(get_json_value,check_zero_or_one) + +def check_other_attr_action(x,context:str) -> OtherAttrAction: + if x == "ignore": return OtherAttrAction.ignore + if x == "error": return OtherAttrAction.error + raise TypeError(f'value for "{context}" must be "error" or "ignore"') +get_json_other_attr_action = functools.partial(get_json_value,check_other_attr_action) + +def check_typeref(x,context:str) -> TypeRef: + x = check_obj(x,context) + return TypeRef( + '', + get_json_str(context,x,'py_name',''), + get_json_str(context,x,'type'), + get_json_bool(context,x,'is_list',False), + get_json_zero_or_one(context,x,'min_items',1)) +get_json_typeref = functools.partial(get_json_value,check_typeref) + +def check_attribute(x,context:str) -> Attribute: + x = check_obj(x,context) + return Attribute( + '', + get_json_str(context,x,'py_name',''), + get_json_str(context,x,'type'), + get_json_bool(context,x,'optional',False)) +get_json_attribute = functools.partial(get_json_value,check_attribute) + +def check_enum_entry(x,context: str) -> EnumEntry: + if isinstance(x,str): + return EnumEntry(x,x) + if isinstance(x,dict): + xml = get_json_str(context,x,'xml') + id = get_json_str(context,x,'id',xml) + if not id.isidentifier(): + raise ValueError(f'value of "{context}" is not a valid Python identifier') + return EnumEntry(xml,id) + raise TypeError(f'"{context}" must be a string or object') + +def make_tag_only_element(x: dict[str,Any],context: str) -> TagOnlyElement: + return TagOnlyElement( + '', + get_json_list(check_string,context,x,'bases',[]), + get_json_mapping(check_attribute,context,x,'attributes',{}), + get_json_other_attr_action(context,x,'other_attr',OtherAttrAction.error), + get_json_mapping(check_typeref,context,x,'children',{}), + False) + +def make_list_element(x: dict[str,Any],context: str,content_t: ContentType) -> ListElement: + return ListElement( + '', + get_json_list(check_string,context,x,'bases',[]), + get_json_mapping(check_attribute,context,x,'attributes',{}), + get_json_other_attr_action(context,x,'other_attr',OtherAttrAction.error), + get_json_mapping(check_typeref,context,x,'children',{}), + False, + get_json_int(context,x,'min_items',0), + get_json_mapping(check_string,context,x,'content',{}), + content_t, + get_json_bool(context,x,'allow_text',False)) + +def make_enumeration(x: dict[str,Any],context: str) -> SchemaEnum: + return SchemaEnum('',get_json_list(check_enum_entry,context,x,'values')) + +def make_char_enumeration(x: dict[str,Any],context: str) -> SchemaCharEnum: + return SchemaCharEnum('',get_json_str(context,x,'values')) + +def check_type(x,context:str) -> SchemaType: + x = check_obj(x,context) + kind = get_json_str(context,x,'kind') + if kind == 'tag_only_element': + return make_tag_only_element(x,context) + if kind == 'list_element': + return make_list_element(x,context,ContentType.bare) + if kind == 'union_list_element': + return make_list_element(x,context,ContentType.union) + if kind == 'tuple_list_element': + return make_list_element(x,context,ContentType.tuple) + if kind == 'enumeration': + return make_enumeration(x,context) + if kind == 'char_enumeration': + return make_char_enumeration(x,context) + + raise ValueError(f'"{context}.kind" must be "tag_only_element", "list_element", "mixed_element" or "enumeration"') +get_json_type = functools.partial(get_json_value,check_type) + +def check_schema(x) -> Schema: + if not isinstance(x,dict): + raise TypeError('json value must be an object') + r = Schema( + get_json_mapping(check_string,'',x,'roots'), + get_json_mapping(check_type,'',x,'types',{})) + r.types['#spType'] = SpType('spType','str') + for t,py in BUILTIN_ATTR_SCHEMA_TYPES: + r.types['#'+t] = BuiltinAttributeType(t,py) + return r + +def generate_from_json(json_path,c_template_file,pyi_template_file,c_output_file,pyi_output_file) -> None: + with open(json_path,'rb') as ifile: + schema = check_schema(json.load(ifile)) + + env = make_env(schema) + + with open(c_template_file) as tfile: + template_str = tfile.read() + with open(c_output_file,'w') as ofile: + env.from_string(template_str).stream().dump(ofile) + + with open(pyi_template_file) as tfile: + template_str = tfile.read() + with open(pyi_output_file,'w') as ofile: + env.from_string(template_str).stream().dump(ofile) + + +if __name__ == '__main__': + generate_from_json(sys.argv[1],sys.argv[2],sys.argv[3],sys.argv[4],sys.argv[5]) diff --git a/xml_parser_generator/module_template.c b/xml_parser_generator/module_template.c new file mode 100644 index 00000000..83ed3b00 --- /dev/null +++ b/xml_parser_generator/module_template.c @@ -0,0 +1,1993 @@ +#define PY_SSIZE_T_CLEAN +#include + +/* Py_LIMITED_API isn't compatible with Py_TRACE_REFS */ +#if !defined(Py_TRACE_REFS) && defined(PARSER_PY_LIMITED_API) +#define Py_LIMITED_API PARSER_PY_LIMITED_API +#endif + +#include +#include +#include +#include + +#pragma GCC diagnostic ignored "-Wunused-parameter" + +#define STACK_BLOCK_SIZE 100 +#define NODE_LIST_INITIAL_CAPACITY 5 + +#ifndef MODULE_NAME +#define MODULE_NAME parser +#endif + +#ifndef FULL_MODULE_STR +#define FULL_MODULE_STR Py_STRINGIFY(MODULE_NAME) +#endif + +#define _MAKE_INIT_FUN_NAME(END) PyInit_##END +#define MAKE_INIT_FUN_NAME(END) _MAKE_INIT_FUN_NAME(END) + +#define EXPAT_BUFFER_SIZE 0x1000 + +/* According to the Expat documentation, the "len" argument passed to XML_Parse +must be "considerably less than the maximum value for an integer", so long input +is broken into chunks. */ +#define EXPAT_CHUNK_SIZE (1<<20) + +#ifndef Py_TPFLAGS_SEQUENCE +#define Py_TPFLAGS_SEQUENCE 0 +#endif + + +enum { + CLASS_FROZEN_LIST = 0, + CLASS_TAGGED_VALUE, +//% for type in types|select('used_directly') + CLASS__{$ type $}, +//% endfor +//% for type in types|select('content_tuple') + CLASS_ITEM__{$ type $}, +//% endfor + CLASS_COUNT +}; + +//% for type in types|select('enumeration_t') +/* it's ENUM_VALUE because it refers to a Python enum value */ +enum { +//% for value in type.children + ENUM_VALUE__{$ type $}__{$ value.id $}{$ ' = 0' if loop.first $}, +//% endfor + ENUM_VALUE_COUNT__{$ type $} +}; +//% endfor + +typedef enum { +//% for n in union_tag_names + TAGGED_UNION_NAME__{$ n $}{$ ' = 0' if loop.first $}, +//% endfor + TAGGED_UNION_NAME_COUNT +} tagged_union_name_t; + +//% if char_enum_chars +const char enum_chars[] = {{% for c in char_enum_chars %}'{$ c $}'{$ ',' if not loop.last $}{% endfor %}}; + +enum { +//% for c in char_enum_chars + ENUM_CHAR__{$ c $}{$ ' = 0' if loop.first $}, +//% endfor + ENUM_CHAR_COUNT +}; +//% endif + +static PyModuleDef module_def; + +typedef struct { + /* the type of the exception thrown for errors in the input */ + PyObject *parse_error_exc_type; + + /* the type of the warning category for ignorable problems in the input */ + PyObject *parse_warn_exc_type; + + PyObject *tag_names[TAGGED_UNION_NAME_COUNT]; + +//% if char_enum_chars + PyObject *char_objects[ENUM_CHAR_COUNT]; +//% endif + + /* an array of record-like classes */ + PyTypeObject *classes[CLASS_COUNT]; + +//% for type in types|select('enumeration_t') + PyObject *enum_values__{$ type $}[ENUM_VALUE_COUNT__{$ type $}]; +//% endfor +} module_state; + +struct _parse_state; +struct _parse_callbacks; + +typedef int (*child_start_callback)(struct _parse_state*,const XML_Char*,const XML_Char**); +typedef int (*finish_callback)(struct _parse_state*); +typedef int (*text_callback)(struct _parse_state*,const XML_Char*,int); + +typedef struct _parse_callbacks { + PyObject **value; + child_start_callback cs_call; + text_callback t_call; + finish_callback f_call; +} parse_callbacks; + +typedef struct _callback_stack_block { + struct _callback_stack_block *prev; + struct _callback_stack_block *next; + parse_callbacks stack[STACK_BLOCK_SIZE]; +} callback_stack_block; + +typedef struct _parse_state { + callback_stack_block *block; + unsigned int block_used; + XML_Parser parser; + int ignore_level; + module_state *py; +} parse_state; + +static parse_callbacks *push_callbacks(parse_state *state) { + if(state->block_used == STACK_BLOCK_SIZE) { + if(state->block->next == NULL) { + state->block->next = PyMem_Malloc(sizeof(callback_stack_block)); + if(state->block->next == NULL) return NULL; + state->block->next->next = NULL; + state->block->next->prev = state->block; + } + state->block = state->block->next; + state->block_used = 0; + } + ++state->block_used; + return &state->block->stack[state->block_used-1]; +} + +static parse_callbacks *top_callbacks(parse_state *state) { + assert(state->block_used); + return &state->block->stack[state->block_used-1]; +} + +static void pop_callbacks(parse_state *state) { + assert(state->block_used); + --state->block_used; + if(state->block_used == 0 && state->block->prev) { + state->block = state->block->prev; + state->block_used = STACK_BLOCK_SIZE; + } +} + +static void set_parse_error(parse_state *state,const char *msg) { + PyErr_Format( + state->py->parse_error_exc_type, + "Error on line %li: %s", + (long)XML_GetCurrentLineNumber(state->parser), + msg); +} +#define SET_PARSE_ERROR_FMT(state,msg,...) \ + PyErr_Format(\ + state->py->parse_error_exc_type,\ + "Error on line %li: " msg,\ + (long)XML_GetCurrentLineNumber(state->parser),\ + __VA_ARGS__) + +static int set_parse_warning(parse_state *state,const char *msg) { + return PyErr_WarnFormat( + state->py->parse_warn_exc_type, + 1, + "Warning on line %li: %s", + (long)XML_GetCurrentLineNumber(state->parser), + msg); +} +#define SET_PARSE_WARNING_FMT(state,msg,...) \ + PyErr_WarnFormat(\ + state->py->parse_warn_exc_type,\ + 1,\ + "Warning on line %li: " msg,\ + (long)XML_GetCurrentLineNumber(state->parser),\ + __VA_ARGS__) + +static void XMLCALL start_element(void *user,const XML_Char *name,const XML_Char **attrs) { + parse_state *state = (parse_state*)user; + parse_callbacks *pc = top_callbacks(state); + + if(state->ignore_level) { + ++state->ignore_level; + return; + } + + if(pc->cs_call != NULL) { + int r = pc->cs_call(state,name,attrs); + if(r < 0) { + XML_StopParser(state->parser,XML_FALSE); + return; + } else if(r) return; + } + + if(SET_PARSE_WARNING_FMT(state,"unexpected element \"%s\"",name)) { + XML_StopParser(state->parser,XML_FALSE); + } else { + state->ignore_level = 1; + } +} + +static void XMLCALL end_element(void *user,const XML_Char *Py_UNUSED(name)) { + parse_state *state = (parse_state*)user; + parse_callbacks *pc = top_callbacks(state); + + if(state->ignore_level) { + --state->ignore_level; + return; + } + + if(pc->f_call != NULL && pc->f_call(state)) XML_StopParser(state->parser,XML_FALSE); + pop_callbacks(state); +} + +int non_whitespace(const char *s,int len) { + int i; + for(i=0; i= 0; + default: + return 1; + } + } + return 0; +} + +static void XMLCALL character_data(void *user,const XML_Char *s,int len) { + parse_state *state = (parse_state*)user; + parse_callbacks *pc = top_callbacks(state); + + if(state->ignore_level) return; + + if(pc->t_call != NULL) { + if(pc->t_call(state,s,len)) XML_StopParser(state->parser,XML_FALSE); + } else if(non_whitespace(s,len)) { + if(set_parse_warning(state,"unexpected character data")) { + XML_StopParser(state->parser,XML_FALSE); + } + } +} + +static int find_str_in_array(const char *str,const char *array[],int a_size) { + for(int i=0; i 0) Py_VISIT(objs[size]); + return 0; +} + +/* Creating a type with PyStructSequence_NewType would have been preferable, but +there is no way to get the address of a value inside a native Python object +using the stable ABI. The parser stack requires the address of where the current +value being parsed, will be placed, so that extra code isn't needed for the end +of each child element to move the value to the correct spot. */ +typedef struct { + PyObject_HEAD + PyObject *values[2]; +} tagged_value; + +static void init_tagged_value(tagged_value *tv) { + tv->values[0] = NULL; + tv->values[1] = NULL; +} +static tagged_value *create_tagged_value(module_state *state) { + tagged_value *r = PyObject_GC_New(tagged_value,state->classes[CLASS_TAGGED_VALUE]); + if(r != NULL) init_tagged_value(r); + return r; +} + +static void tagged_value_dealloc(tagged_value *tv) { + PyTypeObject *t = Py_TYPE(tv); + PyObject_GC_UnTrack(tv); + Py_XDECREF(tv->values[0]); + Py_XDECREF(tv->values[1]); + ((freefunc)PyType_GetSlot(t,Py_tp_free))(tv); + Py_DECREF(t); +} +static int tagged_value_traverse(tagged_value *tv,visitproc visit,void *arg) { + Py_VISIT(tv->values[0]); + Py_VISIT(tv->values[1]); + return 0; +} + +static Py_ssize_t tagged_value_size(PyObject *Py_UNUSED(obj)) { + return 2; +} + +static PyObject *tagged_value_item(tagged_value *tv,Py_ssize_t i) { + if(i < 0 || i > 1) { + PyErr_SetString(PyExc_IndexError,"index out of range"); + return NULL; + } + Py_INCREF(tv->values[i]); + return tv->values[i]; +} + +static PyMemberDef tagged_value_members[] = { + {"name",T_OBJECT_EX,offsetof(tagged_value,values),READONLY,NULL}, + {"value",T_OBJECT_EX,offsetof(tagged_value,values) + sizeof(PyObject*),READONLY,NULL}, + {NULL}}; + +PyObject *tagged_value_tp_new(PyTypeObject *subtype,PyObject *args,PyObject *kwds) { + tagged_value *r; + if(kwds != NULL && PyDict_Size(kwds)) { + PyErr_SetString(PyExc_TypeError,"TaggedValue.__new__ does not take any keyword arguments"); + return NULL; + } + if(PyTuple_Size(args) != 2) { + PyErr_SetString(PyExc_TypeError,"TaggedValue.__new__ takes exactly two arguments"); + return NULL; + } + + r = (tagged_value*)((allocfunc)PyType_GetSlot(subtype,Py_tp_alloc))(subtype,0); + if(r == NULL) return NULL; + + r->values[0] = PyTuple_GetItem(args,0); + Py_INCREF(r->values[0]); + r->values[1] = PyTuple_GetItem(args,1); + Py_INCREF(r->values[1]); + + return (PyObject*)r; +} + +static PyType_Slot tagged_value_slots[] = { + {Py_tp_new,tagged_value_tp_new}, + {Py_tp_members,tagged_value_members}, + {Py_tp_dealloc,tagged_value_dealloc}, + {Py_sq_length,tagged_value_size}, + {Py_sq_item,tagged_value_item}, + {Py_tp_traverse,tagged_value_traverse}, + {0,NULL}}; + + +typedef struct { + PyObject_HEAD + size_t size; + size_t capacity; + PyObject **content; +} frozen_list; + +static void init_frozen_list(frozen_list *fl) { + fl->size = 0; + fl->capacity = 0; + fl->content = NULL; +} + +static frozen_list *create_frozen_list(module_state *state) { + frozen_list *r = PyObject_GC_New(frozen_list,state->classes[CLASS_FROZEN_LIST]); + if(r) init_frozen_list(r); + return r; +} + +/* This steals a reference to 'o'. 'o' can also be NULL to reserve a spot. */ +static int frozen_list_push_object(frozen_list *fl,PyObject *o,size_t initial_cap) { + assert(fl->size <= fl->capacity); + + if(fl->size == fl->capacity) { + PyObject **tmp; + size_t new_cap = fl->capacity * 2; + if(fl->capacity == 0) new_cap = initial_cap; + tmp = PyMem_Realloc(fl->content,new_cap * sizeof(PyObject*)); + if(tmp == NULL) { + PyErr_NoMemory(); + return -1; + } + fl->capacity = new_cap; + fl->content = tmp; + } + fl->content[fl->size++] = o; + return 0; +} + +static PyObject **frozen_list_push_tagged_value(module_state *state,tagged_union_name_t name,frozen_list *fl,size_t initial_cap) { + tagged_value *tv = create_tagged_value(state); + if(tv == NULL) return NULL; + if(frozen_list_push_object(fl,(PyObject*)tv,initial_cap)) { + Py_DECREF(tv); + return NULL; + } + tv->values[0] = state->tag_names[name]; + Py_INCREF(tv->values[0]); + return &tv->values[1]; +} + +static void frozen_list_dealloc(frozen_list *obj) { + size_t i; + PyTypeObject *t = Py_TYPE(obj); + PyObject_GC_UnTrack(obj); + for(i=0; isize; ++i) Py_XDECREF(obj->content[i]); + if(obj->content) PyMem_Free(obj->content); + ((freefunc)PyType_GetSlot(t,Py_tp_free))(obj); + Py_DECREF(t); +} +static int frozen_list_traverse(frozen_list *obj,visitproc visit,void *arg) { + return visit_array(obj->content,obj->size,visit,arg); +} + +static Py_ssize_t frozen_list_size(frozen_list *fl) { + return (Py_ssize_t)fl->size; +} + +static PyObject *frozen_list_item(frozen_list *fl,Py_ssize_t i) { + if(i < 0 || (size_t)i >= fl->size) { + PyErr_SetString(PyExc_IndexError,"index out of range"); + return NULL; + } + Py_INCREF(fl->content[i]); + return fl->content[i]; +} + +static int frozen_list_fill(frozen_list *fl,PyObject *iterable) { + PyObject *itr; + PyObject *tmp; + Py_ssize_t initial_size = PyObject_Size(iterable); + if(initial_size < 0) { + PyErr_Clear(); + initial_size = NODE_LIST_INITIAL_CAPACITY; + } + if(initial_size == 0) return 0; + + itr = PyObject_GetIter(iterable); + if(itr == NULL) return -1; + while((tmp = PyIter_Next(itr))) { + if(frozen_list_push_object(fl,tmp,(size_t)initial_size)) { + Py_DECREF(tmp); + Py_DECREF(itr); + return -1; + } + } + Py_DECREF(itr); + if(PyErr_Occurred()) return -1; + return 0; +} + +void raise_no_keyword_allowed(const char *func) { + PyErr_Format(PyExc_TypeError,"%s does not take any keyword arguments",func); +} + +PyObject *frozen_list_tp_new(PyTypeObject *subtype,PyObject *args,PyObject *kwds) { + frozen_list *r; + if(kwds != NULL && PyDict_Size(kwds)) { + raise_no_keyword_allowed("FrozenList.__new__"); + return NULL; + } + if(PyTuple_Size(args) != 1) { + PyErr_SetString(PyExc_TypeError,"FrozenList.__new__ takes exactly one argument"); + return NULL; + } + + r = (frozen_list*)((allocfunc)PyType_GetSlot(subtype,Py_tp_alloc))(subtype,0); + if(r == NULL) return NULL; + + init_frozen_list(r); + if(frozen_list_fill(r,PyTuple_GetItem(args,0))) { + Py_DECREF(r); + return NULL; + } + return (PyObject*)r; +} + +static PyType_Slot frozen_list_slots[] = { + {Py_tp_new,frozen_list_tp_new}, + {Py_tp_dealloc,frozen_list_dealloc}, + {Py_sq_length,frozen_list_size}, + {Py_sq_item,frozen_list_item}, + {Py_tp_traverse,frozen_list_traverse}, + {0,NULL} +}; + + +typedef enum { +//% for n in element_names + ELEMENT__{$ n $}{$ ' = 0' if loop.first $}, +//% endfor + ELEMENT_COUNT, + ELEMENT_OTHER +} element_type; + +static const char *element_names[] = { +//% for n in element_names + "{$ n $}", +//% endfor +}; + +typedef enum { +//% for n in attribute_names + ATTRIBUTE__{$ n $}{$ ' = 0' if loop.first $}, +//% endfor + ATTRIBUTE_COUNT, + ATTRIBUTE_OTHER +} attribute_type; + +typedef enum { +//% for n in py_field_names + PY_FIELD__{$ n $}{$ ' = 0' if loop.first $}, +//% endfor + PY_FIELD_COUNT, + PY_FIELD_OTHER +} py_field_type; + +static const char *attribute_names[] = { +//% for n in attribute_names + "{$ n $}"{$ ',' if not loop.last $} +//% endfor +}; + +static const char *tagged_union_names[] = { +//% for n in union_tag_names + "{$ n $}"{$ ',' if not loop.last $} +//% endfor +}; + +static const char *py_field_names[] = { +//% for n in py_field_names + "{$ n $}"{$ ',' if not loop.last $} +//% endfor +}; + +//% macro hash_lookup(f_name,hash_info,count,names,type,default) +static {$ type $} {$ f_name $}(const char *key) { +//% if hash_info + static const int salt_length = {$ hash_info.salt1|length $}; + static const int g_length = {$ hash_info.g|length $}; + static const int g[] = { +{$ hash_info.g|join(', ')|wordwrap $}}; + int f1 = 0, f2 = 0, i; + + for(i=0; key[i] && i salt_length) return PY_FIELD_OTHER; + +#ifdef NDEBUG + PyUnicode_AsUCS4(key,buffer,key_len,0); +#else + { + Py_UCS4 *r = PyUnicode_AsUCS4(key,buffer,key_len,0); + assert(r); + } +#endif + + for(i=0; i 127) return PY_FIELD_OTHER; + f1 += "{$ py_f_hash.salt1 $}"[i] * (int)buffer[i]; + f2 += "{$ py_f_hash.salt2 $}"[i] * (int)buffer[i]; + } + i = (g[f1 % g_length] + g[f2 % g_length]) % g_length; + + if(i < PY_FIELD_COUNT && PyUnicode_CompareWithASCIIString(key,py_field_names[i]) == 0) return (py_field_type)i; + + return PY_FIELD_OTHER; +} + +typedef struct { + PyObject_HEAD + PyObject *fields[1]; +} node_tagonly_common; + +static void init_node_tagonly_common(node_tagonly_common *n,size_t fields) { + size_t i; + for(i=0; ifields[i] = NULL; +} +static node_tagonly_common *create_node_tagonly_common(module_state *state,size_t class_index,size_t fields) { + node_tagonly_common *r = PyObject_GC_New(node_tagonly_common,state->classes[class_index]); + if(r) init_node_tagonly_common(r,fields); + return r; +} + +static void node_tagonly_common_dealloc(node_tagonly_common *obj,size_t field_count) { + size_t i; + PyTypeObject *t = Py_TYPE(obj); + PyObject_GC_UnTrack(obj); + for(i=0; ifields[i]); + ((freefunc)PyType_GetSlot(t,Py_tp_free))(obj); + Py_DECREF(t); +} +static int node_tagonly_common_traverse(node_tagonly_common *obj,visitproc visit,void *arg,size_t field_count) { + return visit_array(obj->fields,field_count,visit,arg); +} + +//% for count in tagonly_and_tuple_field_counts +static void node_tagonly_common_dealloc_{$ count $}(PyObject *obj) { + node_tagonly_common_dealloc((node_tagonly_common*)obj,{$ count $}); +} +static int node_tagonly_common_traverse_{$ count $}(PyObject *obj,visitproc visit,void *arg) { + return node_tagonly_common_traverse((node_tagonly_common*)obj,visit,arg,{$ count $}); +} +//% endfor + +typedef struct { + frozen_list base; + PyObject *fields[1]; +} node_list_common; + +static void init_node_list_common(node_list_common *n,size_t fields) { + size_t i; + init_frozen_list(&n->base); + for(i=0; ifields[i] = NULL; +} +static node_list_common *create_node_list_common(module_state *state,size_t class_index,size_t fields) { + node_list_common *r = PyObject_GC_New(node_list_common,state->classes[class_index]); + if(r) init_node_list_common(r,fields); + PyObject_GC_Track(r); + return r; +} + +static void node_list_common_dealloc(node_list_common *obj,size_t field_count) { + size_t i; + PyTypeObject *t = Py_TYPE(obj); + PyObject_GC_UnTrack(obj); + for(i=0; ifields[i]); + for(i=0; ibase.size; ++i) Py_XDECREF(obj->base.content[i]); + if(obj->base.content) PyMem_Free(obj->base.content); + ((freefunc)PyType_GetSlot(t,Py_tp_free))(obj); + Py_DECREF(t); +} +static int node_list_common_traverse(node_list_common *obj,visitproc visit,void *arg,size_t field_count) { + int r = visit_array(obj->fields,field_count,visit,arg); + if(r) return r; + return frozen_list_traverse(&obj->base,visit,arg); +} + +//% for count in list_element_field_counts +static void node_list_common_dealloc_{$ count $}(PyObject *obj) { + node_list_common_dealloc((node_list_common*)obj,{$ count $}); +} +static int node_list_common_traverse_{$ count $}(PyObject *obj,visitproc visit,void *arg) { + return node_list_common_traverse((node_list_common*)obj,visit,arg,{$ count $}); +} +//% endfor + +/* this steals a reference to "src" */ +static int append_str_obj(PyObject **dest,PyObject *src) { + assert(PyUnicode_CheckExact(*dest)); + + PyObject *tmp = PyUnicode_Concat(*dest,src); + if(tmp == NULL) return -1; + Py_DECREF(*dest); + *dest = tmp; + return 0; +} + +static int node_list_common_text(parse_state *state,const XML_Char *str,int size) { + int r; + frozen_list *n = &((node_list_common*)*top_callbacks(state)->value)->base; + PyObject *str_obj = PyUnicode_FromStringAndSize(str,size); + if(str_obj == NULL) return -1; + + if(n->size && PyUnicode_CheckExact(n->content[n->size-1])) { + r = append_str_obj(&n->content[n->size-1],str_obj); + Py_DECREF(str_obj); + return r; + } + + r = frozen_list_push_object(n,str_obj,NODE_LIST_INITIAL_CAPACITY); + if(r) Py_DECREF(str_obj); + return r; +} + +typedef node_tagonly_common tuple_item; +static tuple_item *create_tuple_item(module_state *state,size_t class_index,size_t fields) { + return create_node_tagonly_common(state,class_index,fields); +} + +static PyObject *tuple_item_item_common(PyObject *obj,Py_ssize_t i,Py_ssize_t size) { + if(i < 0 || i >= size) { + PyErr_SetString(PyExc_IndexError,"index out of range"); + return NULL; + } + PyObject *r = ((tuple_item*)obj)->fields[i]; + Py_INCREF(r); + return r; +} + +//% for count in tuple_field_counts +static Py_ssize_t tuple_item_size_{$ count $}(PyObject *Py_UNUSED(obj)) { + return {$ count $}; +} + +static PyObject *tuple_item_item_{$ count $}(PyObject *obj,Py_ssize_t i) { + return tuple_item_item_common(obj,i,{$ count $}); +} +//% endfor + +static PyObject **frozen_list_push_tuple_item(parse_state *state,Py_ssize_t tuple_i,Py_ssize_t tuple_size,const char **field_names,int class_index,frozen_list *fl,size_t initial_cap) { + assert(tuple_size > 0); + if(tuple_i == 0) { + tuple_item *new_tuple; + + if(fl->size && ((tuple_item*)fl->content[fl->size-1])->fields[tuple_size-1] == NULL) { + SET_PARSE_ERROR_FMT( + state, + "\"%s\" element can only come after \"%s\" element or be the first in its group", + field_names[0], + field_names[tuple_size-1]); + return NULL; + } + + new_tuple = create_tuple_item(state->py,class_index,(size_t)tuple_size); + if(frozen_list_push_object(fl,(PyObject*)new_tuple,initial_cap)) { + Py_DECREF(new_tuple); + return NULL; + } + return &new_tuple->fields[0]; + } + + if(!fl->size || ((tuple_item*)fl->content[fl->size-1])->fields[tuple_i-1] == NULL) { + SET_PARSE_ERROR_FMT( + state, + "\"%s\" element can only come after \"%s\" element", + field_names[tuple_i], + field_names[tuple_i-1]); + return NULL; + } + return &((tuple_item*)fl->content[fl->size-1])->fields[tuple_i]; +} + +static int frozen_list_check_complete_tuple(parse_state *state,Py_ssize_t tuple_size,const char **field_names,frozen_list *fl) { + if(fl->size) { + tuple_item *last = (tuple_item*)fl->content[fl->size-1]; + assert(last->fields[0] != NULL); + Py_ssize_t i = tuple_size; + while(last->fields[i-1] == NULL) --i; + if(i != tuple_size) { + SET_PARSE_ERROR_FMT( + state, + "\"%s\" element must come after \"%s\" element", + field_names[i], + field_names[i-1]); + return -1; + } + } + return 0; +} + +{% macro common_affix(type) %}{$ 'list' if type is list_e else 'tagonly' $}{% endmacro %} + +//% for type in types +//% if type is used_directly + +enum { +//% for b,off in type|base_offsets + BASE_FIELD_OFFSET__{$ type $}{$ '__'~b if b $} = {$ off $}{$ ',' if not loop.last $} +//% endfor +}; + +enum { +//% if type is has_fields +//% for f in type.fields() + FIELD__{$ type $}__{$ f.py_name $}{$ ' = BASE_FIELD_OFFSET__'~type if loop.first $}, +//% endfor + FIELD_COUNT__{$ type $} +//% else + FIELD_COUNT__{$ type $} = 0 +//% endif +}; + +//% if type is has_fields +static PyMemberDef node_class_members__{$ type $}[] = { +//% for f in type.fields() + {"{$ f.py_name $}",T_OBJECT_EX,offsetof(node_{$ common_affix(type) $}_common,fields) + FIELD__{$ type $}__{$ f.py_name $} * sizeof(PyObject*),READONLY,NULL}, +//% endfor + {NULL} +}; +//% endif + +static PyObject *node_class_tp_new__{$ type $}(PyTypeObject *subtype,PyObject *args,PyObject *kwds); + +static PyType_Slot node_class_slots__{$ type $}[] = { + {Py_tp_new,node_class_tp_new__{$ type $}}, +//% if type is has_fields + {Py_tp_members,node_class_members__{$ type $}}, +//% endif + {Py_tp_dealloc,node_{$ common_affix(type) $}_common_dealloc_{$ type|field_count $}}, + {Py_tp_traverse,node_{$ common_affix(type) $}_common_traverse_{$ type|field_count $}}, + {0,NULL} +}; + + +//% if type is has_children_or_content +static int node_class_child_start__{$ type $}(parse_state*,const XML_Char*,const XML_Char**); +//% endif +//% if type is has_children_or_tuple_content +static int node_class_finish__{$ type $}(parse_state*); +//% endif +static int node_class_start__{$ type $}(parse_state*,PyObject**,const XML_Char**); +//% endif +//% if type is element +//% if type|field_count +static void node_class_new_set_fields__{$ type $}(PyObject **fields,PyObject *args,Py_ssize_t start_i); +static int node_class_new_set_kw_field__{$ type $}(module_state *state,PyObject **fields,py_field_type field,PyObject *value); +static int node_class_new_fields_end__{$ type $}(module_state *state,PyObject **fields); +//% endif +//% if type is has_attributes +static int node_class_attr__{$ type $}(parse_state*,{$ 'PyObject**,' if type is has_fields $}attribute_type,const XML_Char**); +//% endif +//% if type is has_children_or_content +static int node_class_child__{$ type $}(parse_state*,{$ 'PyObject**,' if type is has_fields $}element_type,const XML_Char**); +//% endif +//% if type is has_children +static int node_class_finish_fields__{$ type $}(parse_state *state,PyObject **fields); +//% endif +//% elif type is enumeration_t or type is char_enum_t +static PyObject *parse__{$ type $}(parse_state*,const char*); +//% endif +//% if type is content_tuple + +enum { +//% for field,ftype in type|content + TUPLE_ITEM_FIELD__{$ type $}__{$ field $}{$ ' = 0' if loop.first $}, +//% endfor + TUPLE_ITEM_FIELD_COUNT__{$ type $} +}; + +const char *tuple_item_field_names__{$ type $}[] = { +//% for field,ftype in type|content + "{$ field $}", +//% endfor + NULL /* needed by tp_new method */ +}; + +static PyMemberDef tuple_item_members__{$ type $}[] = { +//% for field,ftype in type|content + {"{$ field $}",T_OBJECT_EX,offsetof(tuple_item,fields) + TUPLE_ITEM_FIELD__{$ type $}__{$ field $} * sizeof(PyObject*),READONLY,NULL}, +//% endfor + {NULL} +}; + +PyObject *tuple_item_tp_new__{$ type $}(PyTypeObject *subtype,PyObject *args,PyObject *kwds) { + tuple_item *r = (tuple_item*)((allocfunc)PyType_GetSlot(subtype,Py_tp_alloc))(subtype,0); + if(r == NULL) return NULL; + + if(!PyArg_ParseTupleAndKeywords( + args, + kwds, + "{$ 'O'*(type|content|length) $}:{$ type $}.__new__", + (char**)tuple_item_field_names__{$ type $}, +//% for _ in type|content + &r->fields[{$ loop.index0 $}]{$ ',' if not loop.last $} +//% endfor + )) { + Py_DECREF(r); + return NULL; + } + + return (PyObject*)r; +} + +static PyType_Slot tuple_item_slots__{$ type $}[] = { + {Py_tp_members,tuple_item_members__{$ type $}}, + {Py_tp_dealloc,node_tagonly_common_dealloc_{$ type.content|length $}}, + {Py_tp_traverse,node_tagonly_common_traverse_{$ type.content|length $}}, + {Py_sq_length,tuple_item_size_{$ type.content|length $}}, + {Py_sq_item,tuple_item_item_{$ type.content|length $}}, + {0,NULL} +}; + +//% endif +//% endfor + +static int warn_unexpected_attribute(parse_state *state,const char *name) { + return SET_PARSE_WARNING_FMT(state,"unexpected attribute \"%s\"",name); +} + +static int warn_duplicate_attribute(parse_state *state,const char *name) { + return SET_PARSE_WARNING_FMT(state,"duplicate attribute \"%s\"",name); +} + +static void raise_duplicate_element_error(parse_state *state,const char *name) { + SET_PARSE_ERROR_FMT(state,"\"%s\" cannot appear more than once in this context",name); +} + +static void raise_missing_element_error(parse_state *state,const char *name) { + SET_PARSE_ERROR_FMT(state,"missing \"%s\" child",name); +} +static void raise_empty_list_element_error(parse_state *state,const char *name) { + SET_PARSE_ERROR_FMT(state,"at least one \"%s\" child is required",name); +} + +static void raise_invalid_enum_error(parse_state *state,const char *value) { + SET_PARSE_ERROR_FMT(state,"\"%s\" is not one of the allowed enumeration values",value); +} + +static void raise_invalid_char_enum_error(parse_state *state,char c,const char *allowed) { + SET_PARSE_ERROR_FMT(state,"\"%c\" is not one of the allowed character values; must be one of \"%s\"",c,allowed); +} + +int parse_integer(parse_state *state,const char *str,long *value) { + char *end; + errno = 0; + *value = strtol(str,&end,10); + if(errno != 0 || non_whitespace(end,-1)) { + errno = 0; + set_parse_error(state,"cannot parse integer"); + return -1; + } + return 0; +} + +static int set_string_attribute(parse_state *state,PyObject **field,const XML_Char **attr) { + PyObject *tmp; + if(*field != NULL) return warn_duplicate_attribute(state,attr[0]); + tmp = PyUnicode_FromString(attr[1]); + if(tmp == NULL) return -1; + *field = tmp; + return 0; +} +static int set_integer_attribute(parse_state *state,PyObject **field,const XML_Char **attr) { + long value; + PyObject *tmp; + if(*field != NULL) return warn_duplicate_attribute(state,attr[0]); + + if(parse_integer(state,attr[1],&value)) return -1; + tmp = PyLong_FromLong(value); + if(tmp == NULL) return -1; + *field = tmp; + return 0; +} +static int set_DoxBool_attribute(parse_state *state,PyObject **field,const XML_Char **attr) { + if(*field != NULL) return warn_duplicate_attribute(state,attr[0]); + if(strcmp(attr[1],"yes") == 0) *field = Py_True; + else if(strcmp(attr[1],"no") == 0) *field = Py_False; + else { + SET_PARSE_ERROR_FMT(state,"\"%s\" must be \"yes\" or \"no\"",attr[0]); + return -1; + } + Py_INCREF(*field); + return 0; +} + +static int node_string_text(parse_state *state,const XML_Char *str,int size) { + PyObject **dest = top_callbacks(state)->value; + PyObject *tmp = PyUnicode_FromStringAndSize(str,size); + if(tmp == NULL) return -1; + if(*dest == NULL) *dest = tmp; + else { + PyObject *joined = PyUnicode_Concat(*dest,tmp); + if(joined == NULL) { + Py_DECREF(tmp); + return -1; + } + Py_DECREF(*dest); + *dest = joined; + Py_DECREF(tmp); + } + return 0; +} +static int node_string_end(parse_state *state) { + PyObject **dest = top_callbacks(state)->value; + if(*dest == NULL) { + *dest = PyUnicode_FromStringAndSize(NULL,0); + if(*dest == NULL) return -1; + } + return 0; +} +static int node_start_string(parse_state *state,PyObject **dest,const XML_Char **attr) { + parse_callbacks *cb; + + for(; *attr != NULL; attr += 2) { + if(warn_unexpected_attribute(state,attr[0])) return -1; + } + + *dest = NULL; + + cb = push_callbacks(state); + if(cb == NULL) return -1; + + cb->value = dest; + cb->cs_call = NULL; + cb->f_call = node_string_end; + cb->t_call = node_string_text; + + return 1; +} + +static int node_start_empty(parse_state *state,PyObject **dest,const XML_Char **attr) { + parse_callbacks *cb; + + for(; *attr != NULL; attr += 2) { + if(warn_unexpected_attribute(state,attr[0])) return -1; + } + + cb = push_callbacks(state); + if(cb == NULL) return -1; + + *dest = Py_None; + Py_INCREF(Py_None); + + cb->value = NULL; + cb->cs_call = NULL; + cb->f_call = NULL; + cb->t_call = NULL; + + return 1; +} + +static int node_start_spType(parse_state *state,PyObject **dest,const XML_Char **attr) { + parse_callbacks *cb; + PyObject *c_obj; + char c = ' '; + + for(; *attr != NULL; attr += 2) { + long value; + + if(strcmp(attr[0],"value") != 0) { + if(warn_unexpected_attribute(state,attr[0])) return -1; + } + + if(parse_integer(state,attr[1],&value)) return -1; + if(value < 0 || value > 127) { + set_parse_error(state,"\"value\" must be between 0 and 127"); + return -1; + } + c = (char)value; + } + + c_obj = PyUnicode_FromStringAndSize(&c,1); + if(c_obj == NULL) return -1; + if(*dest == NULL) *dest = c_obj; + else { + int r = append_str_obj(dest,c_obj); + Py_DECREF(c_obj); + if(r) return -1; + } + + cb = push_callbacks(state); + if(cb == NULL) return -1; + + cb->value = NULL; + cb->cs_call = NULL; + cb->f_call = NULL; + cb->t_call = NULL; + + return 1; +} + +static void raise_dup_field_error(const char *name) { + PyErr_Format(PyExc_TypeError,"received more than one value for \"%s\"",name); +} + +static void raise_too_many_args_count(const char *func,Py_ssize_t given,Py_ssize_t maximum) { + PyErr_Format(PyExc_TypeError,"%s takes at most %zi arguments, %zi were given",func,maximum,given); +} +static void raise_invalid_keyword_arg(const char *func,PyObject *key) { + PyErr_Format(PyExc_TypeError,"%s does not take the keyword argument \"%U\"",func,key); +} +static void raise_needs_value_arg(const char *func,const char *key) { + PyErr_Format(PyExc_TypeError,"%s called with missing argument: \"%s\"",func,key); +} +static void raise_needs_pos_arg(const char *func,Py_ssize_t i) { + PyErr_Format(PyExc_TypeError,"%s called with missing positional argument #%zi",func,i+1); +} + +static int node_set_py_field(module_state *state,PyObject **field,PyObject *value,const char *name) { + if(*field != NULL) { + raise_dup_field_error(name); + return -1; + } + + *field = value; + return 1; +} + +static int node_set_py_field_frozen_list(module_state *state,PyObject **field,PyObject *value,const char *name) { + if(*field != NULL) { + raise_dup_field_error(name); + return -1; + } + + if(PyObject_TypeCheck(value,state->classes[CLASS_FROZEN_LIST])) { + *field = value; + return 1; + } + + *field = (PyObject*)create_frozen_list(state); + if(*field == NULL) return -1; + if(frozen_list_fill((frozen_list*)*field,value)) return -1; + return 1; +} + +//% macro handle_attr(name,field,type) +//% if type is builtin_t + if(set_{$ type $}_attribute(state,&fields[FIELD__{$ name $}__{$ field $}],attr)) return -1; +//% else + if(fields[FIELD__{$ name $}__{$ field $}] != NULL) { + if(warn_duplicate_attribute(state,"{$ field $}")) return -1; + } else { + fields[FIELD__{$ name $}__{$ field $}] = parse__{$ type $}(state,attr[1]); + if(fields[FIELD__{$ name $}__{$ field $}] == NULL) return -1; + } +//% endif +//% endmacro + +//% for type in types +//% if type is element +//% if type|field_count +static void node_class_new_set_fields__{$ type $}(PyObject **fields,PyObject *args,Py_ssize_t start_i) { +//% for b in type.bases if b|field_count + node_class_new_set_fields__{$ b $}(fields + BASE_FIELD_OFFSET__{$ type $}__{$ b $},args,start_i + BASE_FIELD_OFFSET__{$ type $}__{$ b $}); +//% endfor + if(PyTuple_Size(args) - start_i > 0) { + switch(PyTuple_Size(args) - start_i) { +//% for f in type.fields()|reverse + {$ 'default' if loop.first else 'case '~loop.revindex $}: + fields[FIELD__{$ type $}__{$ f.py_name $}] = PyTuple_GetItem(args,FIELD__{$ type $}__{$ f.py_name $} + start_i); + assert(fields[FIELD__{$ type $}__{$ f.py_name $}]); + Py_INCREF(fields[FIELD__{$ type $}__{$ f.py_name $}]); +//% endfor + } + } +} +static int node_class_new_set_kw_field__{$ type $}(module_state *state,PyObject **fields,py_field_type field,PyObject *value) { +//% for b in type.bases if b|field_count +//% if loop.first + int r; +//% endif + r = node_class_new_set_kw_field__{$ b $}(fields + BASE_FIELD_OFFSET__{$ type $}__{$ b $},args,start_i + BASE_FIELD_OFFSET__{$ type $}__{$ b $}); + if(r) return r; +//% endfor + switch(field) { +//% for ref in type|attributes + case PY_FIELD__{$ ref.py_name $}: + return node_set_py_field(state,&fields[FIELD__{$ type $}__{$ ref.py_name $}],value,"{$ ref.py_name $}"); +//% endfor +//% for ref in type|children + case PY_FIELD__{$ ref.py_name $}: + return node_set_py_field{$ '_frozen_list' if ref.is_list $}(state,&fields[FIELD__{$ type $}__{$ ref.py_name $}],value,"{$ ref.py_name $}"); +//% endfor + default: + return 0; + } +} +static int node_class_new_fields_end__{$ type $}(module_state *state,PyObject **fields) { +//% for b in type.bases if b|field_count + if(node_class_new_fields_end__{$ b $}(fields + BASE_FIELD_OFFSET__{$ type $}__{$ b $})) return -1; +//% endfor +//% for ref in type|attributes + if(fields[FIELD__{$ type $}__{$ ref.py_name $}] == NULL) { +//% if ref.optional + fields[FIELD__{$ type $}__{$ ref.py_name $}] = Py_None; + Py_INCREF(Py_None); +//% else + raise_needs_value_arg("Node_{$ type $}.__new__","{$ ref.py_name $}"); + return -1; +//% endif + } +//% endfor +//% for ref in type|children + if(fields[FIELD__{$ type $}__{$ ref.py_name $}] == NULL) { +//% if ref.is_list + fields[FIELD__{$ type $}__{$ ref.py_name $}] = (PyObject*)create_frozen_list(state); +//% elif ref.min_items == 0 + fields[FIELD__{$ type $}__{$ ref.py_name $}] = Py_None; + Py_INCREF(Py_None); +//% else + raise_needs_value_arg("Node_{$ type $}.__new__","{$ ref.py_name $}"); + return -1; +//% endif + } +//% endfor + return 0; +} + +//% endif +//% if type is has_attributes +static int node_class_attr__{$ type $}(parse_state *state,{$ 'PyObject **fields,' if type is has_fields $}attribute_type attr_index,const XML_Char **attr) { +//% for b in type.bases|select('has_attributes') +//% if loop.first + int r; +//% endif + r = node_class_attr__{$ b $}(state,{$ 'fields+BASE_FIELD_OFFSET__'~type~__~b~',' if type is has_fields and b is has_fields $}attr_index,attr); + if(r != 0) return r; +//% endfor + switch(attr_index) { +//% for attr in type|attributes + case ATTRIBUTE__{$ attr.name $}: +{$ handle_attr(type,attr.py_name,attr.type) $} return 1; +//% endfor + default: + return 0; + } +} +//% endif +//% if type is has_children_or_content +static int node_class_child__{$ type $}(parse_state *state,{$ 'PyObject **fields,' if type is has_fields $}element_type e_index,const XML_Char **attr) { +//% if type is list_e and type.content + node_list_common *n; +//% endif +//% if type.bases + int r; +//% for b in type.bases|select('has_children_or_content') + r = node_class_child__{$ b $}(state,{$ 'fields+BASE_FIELD_OFFSET__'~type~'__'~b~',' if type is has_fields and b is has_fields $}e_index,attr); + if(r) return r; +//% endfor +//% endif +//% if type is list_e and type.content + n = (node_list_common*)*top_callbacks(state)->value; +//% endif + switch(e_index) { +//% for cref in type|children + case ELEMENT__{$ cref.name $}: +//% if cref.is_list + assert(fields[FIELD__{$ type $}__{$ cref.py_name $}] != NULL && Py_TYPE(fields[FIELD__{$ type $}__{$ cref.py_name $}]) == state->py->classes[CLASS_FROZEN_LIST]); + { + frozen_list *fl = (frozen_list*)fields[FIELD__{$ type $}__{$ cref.py_name $}]; + if(frozen_list_push_object(fl,NULL,NODE_LIST_INITIAL_CAPACITY)) return -1; + return node_{$ 'start_' if cref.type is builtin_t else 'class_start__' $}{$ cref.type $}(state,fl->content + (fl->size-1),attr); + } +//% else + if(fields[FIELD__{$ type $}__{$ cref.py_name $}] != NULL) { + raise_duplicate_element_error(state,"{$ cref.name $}"); + return -1; + } + return node_{$ 'start_' if cref.type is builtin_t else 'class_start__' $}{$ cref.type $}(state,&fields[FIELD__{$ type $}__{$ cref.py_name $}],attr); +//% endif +//% endfor +//% for cname,ctype in type|content + case ELEMENT__{$ cname $}: +//% if type is content_tuple + { + PyObject **dest = frozen_list_push_tuple_item( + state, + TUPLE_ITEM_FIELD__{$ type $}__{$ cname $}, + TUPLE_ITEM_FIELD_COUNT__{$ type $}, + tuple_item_field_names__{$ type $}, + CLASS_ITEM__{$ type $}, + &n->base, + NODE_LIST_INITIAL_CAPACITY); + if(dest == NULL) return -1; + return node_{$ 'start_' if ctype is builtin_t else 'class_start__' $}{$ ctype $}(state,dest,attr); + } +//% elif type is content_union + { +//% if ctype is appends_str + PyObject **dest; + if(n->base.size && PyUnicode_CheckExact(n->base.content[n->base.size-1])) { + dest = &n->base.content[n->base.size-1]; + } else { + dest = frozen_list_push_tagged_value(state->py,TAGGED_UNION_NAME__{$ cname $},&n->base,NODE_LIST_INITIAL_CAPACITY); + if(dest == NULL) return -1; + } +//% else + PyObject **dest = frozen_list_push_tagged_value(state->py,TAGGED_UNION_NAME__{$ cname $},&n->base,NODE_LIST_INITIAL_CAPACITY); + if(dest == NULL) return -1; +//% endif + return node_{$ 'start_' if ctype is builtin_t else 'class_start__' $}{$ ctype $}(state,dest,attr); + } +//% else + if(frozen_list_push_object(&n->base,NULL,NODE_LIST_INITIAL_CAPACITY)) return -1; + return node_{$ 'start_' if ctype is builtin_t else 'class_start__' $}{$ ctype $}(state,n->base.content + (n->base.size-1),attr); +//% endif +//% endfor + default: + return 0; + } +} +//% endif +//% if type is used_directly +static PyObject *node_class_tp_new__{$ type $}(PyTypeObject *subtype,PyObject *args,PyObject *kwds) { + static const char *func_name = "Node_{$ type $}.__new__"; + PyObject *module; + module_state *state; + node_{$ common_affix(type) $}_common *n; +//% if type|field_count + PyObject *key, *value; + Py_ssize_t pos = 0; +//% endif + Py_ssize_t p_arg_count = PyTuple_Size(args); + Py_ssize_t kw_arg_count = kwds == NULL ? 0 : PyDict_Size(kwds); + + if(p_arg_count+kw_arg_count > FIELD_COUNT__{$ type $}{$ ' + 1' if type is list_e $}) { + raise_too_many_args_count(func_name,p_arg_count+kw_arg_count,FIELD_COUNT__{$ type $}{$ ' + 1' if type is list_e $}); + return NULL; + } + +//% if type is list_e + if(p_arg_count < 1) { + raise_needs_pos_arg(func_name,0); + return NULL; + } +//% endif + + module = PyState_FindModule(&module_def); + assert(module); + state = PyModule_GetState(module); + + n = create_node_{$ common_affix(type) $}_common(state,CLASS__{$ type $},FIELD_COUNT__{$ type $}); + if(n == NULL) return NULL; + +//% if type is list_e + if(frozen_list_fill(&n->base,PyTuple_GetItem(args,0))) { + Py_DECREF(n); + return NULL; + } +//% endif + +//% if type|field_count + node_class_new_set_fields__{$ type $}(n->fields,args,{$ '1' if type is list_e else '0' $}); + + if(kw_arg_count) { + while(PyDict_Next(kwds,&pos,&key,&value)) { + int r = node_class_new_set_kw_field__{$ type $}(state,n->fields,py_field_lookup(key),value); + if(r < 0) { + Py_DECREF(n); + return NULL; + } + if(!r) { + raise_invalid_keyword_arg(func_name,key); + Py_DECREF(n); + return NULL; + } + } + } + + if(node_class_new_fields_end__{$ type $}(state,n->fields)) { + Py_DECREF(n); + return NULL; + } +//% endif + + return (PyObject*)n; +} + +static int node_class_start__{$ type $}(parse_state *state,PyObject **dest,const XML_Char **attr) { + parse_callbacks *cb; + + node_{$ common_affix(type) $}_common *n = create_node_{$ common_affix(type) $}_common(state->py,CLASS__{$ type $},FIELD_COUNT__{$ type $}); + if(n == NULL) return -1; + *dest = (PyObject*)n; + +//% for ref in type|children +//% if ref.is_list + n->fields[FIELD__{$ type $}__{$ ref.py_name $}] = (PyObject*)create_frozen_list(state->py); + if(n->fields[FIELD__{$ type $}__{$ ref.py_name $}] == NULL) return -1; +//% endif +//% endfor + +//% if type|attributes|length or type.other_attr == OtherAttrAction.error + for(; *attr != NULL; attr += 2) { +//% if type|attributes|length + int r; + attribute_type attr_index = attribute_lookup(attr[0]); + r = node_class_attr__{$ type $}(state,n->fields,attr_index,attr); +//% if type.other_attr == OtherAttrAction.error + if(r < 0 || (r == 0 && warn_unexpected_attribute(state,attr[0]))) return -1; +//% else + if(r < 0) return -1; +//% endif +//% else + if(warn_unexpected_attribute(state,attr[0])) return -1; +//% endif + } +//% endif + + cb = push_callbacks(state); + if(cb == NULL) return -1; + + cb->value = dest; + +//% if type is has_children_or_content + cb->cs_call = node_class_child_start__{$ type $}; +//% else + cb->cs_call = NULL; +//% endif +//% if type is has_children_or_tuple_content + cb->f_call = node_class_finish__{$ type $}; +//% else + cb->f_call = NULL; +//% endif +//% if type is allow_text + cb->t_call = node_list_common_text; +//% else + cb->t_call = NULL; +//% endif + + return 1; +} +//% if type is has_children_or_content +static int node_class_child_start__{$ type $}(parse_state *state,const XML_Char *child_name,const XML_Char **attr) { + assert(Py_TYPE(*top_callbacks(state)->value) == state->py->classes[CLASS__{$ type $}]); +//% if type is has_fields + node_{$ common_affix(type) $}_common *n = (node_{$ common_affix(type) $}_common*)*top_callbacks(state)->value; +//% endif + return node_class_child__{$ type $}(state,{$ 'n->fields,' if type is has_fields $}element_lookup(child_name),attr); +} +//% endif +//% if type is has_children +static int node_class_finish_fields__{$ type $}(parse_state *state,PyObject **fields) { +//% for b in type.bases|select('has_children') + if(node_class_finish_fields__{$ b $}(state,fields+BASE_FIELD_OFFSET__{$ type $}__{$ b $})) return -1; +//% endfor +//% for ref in type|children +//% if ref.min_items +//% if ref.is_list + assert(fields[FIELD__{$ type $}__{$ ref.py_name $}] != NULL && Py_TYPE(fields[FIELD__{$ type $}__{$ ref.py_name $}]) == state->py->classes[CLASS_FROZEN_LIST]); + if(((frozen_list*)fields[FIELD__{$ type $}__{$ ref.py_name $}])->size < 1) { + raise_empty_list_element_error(state,"{$ ref.name $}"); + return -1; + } +//% else + if(fields[FIELD__{$ type $}__{$ ref.py_name $}] == NULL) { + raise_missing_element_error(state,"{$ ref.name $}"); + return -1; + } +//% endif +//% elif not ref.is_list + if(fields[FIELD__{$ type $}__{$ ref.py_name $}] == NULL) { + fields[FIELD__{$ type $}__{$ ref.py_name $}] = Py_None; + Py_INCREF(Py_None); + } +//% endif +//% endfor + return 0; +} +//% endif +//% if type is has_children_or_tuple_content +static int node_class_finish__{$ type $}(parse_state *state) { + assert(Py_TYPE(*top_callbacks(state)->value) == state->py->classes[CLASS__{$ type $}]); + node_{$ common_affix(type) $}_common *n = (node_{$ common_affix(type) $}_common*)*top_callbacks(state)->value; +//% if type is has_children + if(node_class_finish_fields__{$ type $}(state,n->fields)) return -1; +//% endif +//% if type is content_tuple and type.content|length > 1 + return frozen_list_check_complete_tuple(state,TUPLE_ITEM_FIELD_COUNT__{$ type $},tuple_item_field_names__{$ type $},&n->base); +//% else + return 0; +//% endif +} +//% endif +//% endif +//% elif type is enumeration_t +static const char *enum_value_str__{$ type $}[] = { +//% for value in type.children + "{$ value.xml $}"{$ ',' if not loop.last $} +//% endfor +}; +//% if type.any_renamed +static const char *enum_id_str__{$ type $}[] = { +//% for value in type.children + "{$ value.id $}"{$ ',' if not loop.last $} +//% endfor +}; +//% endif + +{$ hash_lookup('enum_value_lookup__'~type,type.hash,'ENUM_VALUE_COUNT__'~type,'enum_value_str__'~type,'int',-1) $} + +static PyObject *parse__{$ type $}(parse_state *state,const char *str) { + /* TODO: ignore whitespace */ + PyObject *r; + int i = enum_value_lookup__{$ type $}(str); + + if(i < 0) { + raise_invalid_enum_error(state,str); + return NULL; + } + r = state->py->enum_values__{$ type $}[i]; + Py_INCREF(r); + return r; +} +//% elif type is char_enum_t +static PyObject *parse__{$ type $}(parse_state *state,const char *str) { + /* TODO: ignore whitespace */ + PyObject *r; + if(str[0] == '\0' || str[1] != '\0') { + set_parse_error(state,"value must be a single character"); + return NULL; + } + switch(*str) { +//% for c in type.values + case '{$ c $}': + r = state->py->char_objects[ENUM_CHAR__{$ c $}]; + break; +//% endfor + default: + raise_invalid_char_enum_error(state,*str,"{$ type.values $}"); + return NULL; + } + Py_INCREF(r); + return r; +} +//% endif +//% endfor + +static int toplevel_start(parse_state *state,const XML_Char *child_name,const XML_Char **attr) { + tagged_value *tv; + parse_callbacks *cb = top_callbacks(state); + + switch(element_lookup(child_name)) { +//% for name,type in root_elements + case ELEMENT__{$ name $}: + if(*cb->value != NULL) set_parse_error(state,"cannot have more than one root element"); + tv = create_tagged_value(state->py); + if(tv == NULL) return -1; + *cb->value = (PyObject*)tv; + tv->values[0] = state->py->tag_names[TAGGED_UNION_NAME__{$ name $}]; + Py_INCREF(tv->values[0]); + return node_{$ 'start_' if ctype is builtin_t else 'class_start__' $}{$ type $}(state,tv->values + 1,attr); +//% endfor + default: + return 0; + } +} + +typedef struct { + PyType_Spec spec; + unsigned char list_base; +} spec_and_is_list; + +static spec_and_is_list class_specs[] = { + {{FULL_MODULE_STR ".FrozenList",sizeof(frozen_list),0,Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC|Py_TPFLAGS_SEQUENCE,frozen_list_slots},0}, + {{FULL_MODULE_STR ".TaggedValue",sizeof(tagged_value),0,Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_GC|Py_TPFLAGS_SEQUENCE,tagged_value_slots},0}, +//% for type in types|select('used_directly') + {{FULL_MODULE_STR ".Node_{$ type $}",offsetof(node_{$ common_affix(type) $}_common,fields){% if type is has_fields %} + sizeof(PyObject*)*FIELD_COUNT__{$ type $}{% endif %},0,Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_GC,node_class_slots__{$ type $}},{$ '1' if type is list_e else '0' $}}, +//% endfor +//% for type in types|select('content_tuple') + {{FULL_MODULE_STR ".ListItem_{$ type $}",offsetof(tuple_item,fields) + sizeof(PyObject*)*TUPLE_ITEM_FIELD_COUNT__{$ type $},0,Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_GC,tuple_item_slots__{$ type $}},0}, +//% endfor +}; + +static void raise_expat_error(parse_state *state) { + if(!PyErr_Occurred()) { + PyErr_Format( + state->py->parse_error_exc_type, + "Error on line %i: %s", + XML_GetErrorLineNumber(state->parser), + XML_ErrorString(XML_GetErrorCode(state->parser))); + } +} + +static int call_read(void *buf,int buf_size,PyObject *read,PyObject *buf_arg) { + PyObject *str; + Py_ssize_t len; + const char *ptr; + + str = PyObject_CallObject(read,buf_arg); + if(str == NULL) return -1; + + if(PyBytes_Check(str)) { + char *tmp; + if(PyBytes_AsStringAndSize(str,&tmp,&len) < 0) goto error; + ptr = tmp; + } else if(PyByteArray_Check(str)) { + ptr = PyByteArray_AsString(str); + assert(ptr); + len = PyByteArray_Size(str); + } else { + PyErr_SetString(PyExc_TypeError,"read() did not return a bytes object"); + goto error; + } + + if(len > buf_size) { + PyErr_Format( + PyExc_ValueError, + "read() returned too much data: %i bytes requested, %zd returned", + buf_size, + len); + goto error; + } + memcpy(buf,ptr,len); + Py_DECREF(str); + return (int)len; + +error: + Py_DECREF(str); + return -1; +} + +static int parse_file(parse_state *state,PyObject *file) { + enum XML_Status status; + int r = -1; + int bytes_read; + void *buf; + PyObject *read = NULL; + PyObject *buf_arg; + PyObject *tmp; + + buf_arg = PyTuple_New(1); + if(buf_arg == NULL) return -1; + + tmp = PyLong_FromLong(EXPAT_BUFFER_SIZE); + if(tmp == NULL) goto end; + PyTuple_SetItem(buf_arg,0,tmp); + + read = PyObject_GetAttrString(file,"read"); + if(read == NULL) goto end; + + do { + buf = XML_GetBuffer(state->parser,EXPAT_BUFFER_SIZE); + if(buf == NULL) { + raise_expat_error(state); + goto end; + } + + bytes_read = call_read(buf,EXPAT_BUFFER_SIZE,read,buf_arg); + if(bytes_read < 0) goto end; + status = XML_ParseBuffer(state->parser,bytes_read,bytes_read == 0); + if(status == XML_STATUS_ERROR) { + raise_expat_error(state); + goto end; + } + } while(bytes_read); + + r = 0; + + end: + Py_XDECREF(read); + Py_DECREF(buf_arg); + return r; +} + +static int parse_str(parse_state *state,PyObject *data) { + int r = 0; + const char *s; + Py_ssize_t len; + PyObject *tmp = NULL; + + if(PyUnicode_Check(data)) { + tmp = PyUnicode_AsUTF8String(data); + if(tmp == NULL) return -1; + s = PyByteArray_AsString(data); + assert(s); + len = PyByteArray_Size(data); + XML_SetEncoding(state->parser,"utf-8"); + } else if(PyBytes_Check(data)) { + char *s_tmp; + if(PyBytes_AsStringAndSize(data,&s_tmp,&len) < 0) return -1; + s = s_tmp; + } else if(PyByteArray_Check(data)) { + s = PyByteArray_AsString(data); + assert(s); + len = PyByteArray_Size(data); + } else { + PyErr_SetString(PyExc_TypeError,"argument to \"parse_str\" must be an instance of str, bytes or bytearray"); + return -1; + } + + while(len > EXPAT_CHUNK_SIZE) { + if(XML_Parse(state->parser,s,EXPAT_CHUNK_SIZE,0) == XML_STATUS_ERROR) { + raise_expat_error(state); + r = -1; + goto end; + } + + s += EXPAT_CHUNK_SIZE; + len -= EXPAT_CHUNK_SIZE; + } + + if(XML_Parse(state->parser,s,(int)len,1) == XML_STATUS_ERROR) { + raise_expat_error(state); + r = -1; + goto end; + } + + end: + Py_XDECREF(tmp); + return r; +} + +typedef enum { + EXPAT_SOURCE_STR, + EXPAT_SOURCE_FILE +} expat_source; + +static PyObject *parse(PyObject *module,expat_source source,PyObject *obj) { + callback_stack_block *next, *tmp; + parse_callbacks *cb; + int r = -1; + PyObject *r_obj = NULL; + parse_state state = {NULL}; + + state.block = PyMem_Malloc(sizeof(callback_stack_block)); + if(state.block == NULL) { + PyErr_NoMemory(); + return NULL; + } + state.block->prev = NULL; + state.block->next = NULL; + state.py = PyModule_GetState(module); + assert(state.py != NULL); + + cb = push_callbacks(&state); + if(!cb) goto end; + cb->value = &r_obj; + cb->cs_call = toplevel_start; + + state.parser = XML_ParserCreate(NULL); + XML_SetElementHandler(state.parser,start_element,end_element); + XML_SetCharacterDataHandler(state.parser,character_data); + XML_SetUserData(state.parser,&state); + + if(source == EXPAT_SOURCE_STR) r = parse_str(&state,obj); + else r = parse_file(&state,obj); + + XML_ParserFree(state.parser); + + end: + while(state.block->prev != NULL) state.block = state.block->prev; + next = state.block; + while(next) { + tmp = next; + next = next->next; + PyMem_Free(tmp); + } + + if(r) { + Py_XDECREF(r_obj); + return NULL; + } + + if(r_obj == NULL) { + PyErr_SetString(state.py->parse_error_exc_type,"document without a recognized root element"); + } + + return r_obj; +} + +static PyObject *impl_parse_str(PyObject *module,PyObject *str) { + return parse(module,EXPAT_SOURCE_STR,str); +} +static PyObject *impl_parse_file(PyObject *module,PyObject *file) { + return parse(module,EXPAT_SOURCE_FILE,file); +} + +static PyMethodDef module_functions[] = { + {"parse_str",impl_parse_str,METH_O,NULL}, + {"parse_file",impl_parse_file,METH_O,NULL}, + {NULL}}; + +static PyObject *load_enum_base(void) { + PyObject *cls; + PyObject *enum_m = PyImport_ImportModule("enum"); + if(enum_m == NULL) return NULL; + cls = PyObject_GetAttrString(enum_m,"Enum"); + Py_DECREF(enum_m); + return cls; +} + +static void decref_array(PyObject **objs,size_t size) { + while(size-- > 0) Py_DECREF(objs[size]); +} + +static int create_enum( + PyObject **dest, + PyObject *module, + PyObject *base, + const char *enum_name, + const char **py_names, + const char **xml_names, + size_t size) +{ + PyObject **str_objs; + PyObject *members = NULL; + PyObject *e = NULL; + size_t str_i=0; + size_t e_i=0; + size_t i; + int r = -1; + + str_objs = PyMem_Malloc(sizeof(PyObject*) * size); + if(str_objs == NULL) { + PyErr_NoMemory(); + return -1; + } + for(; str_iparse_error_exc_type = PyErr_NewException(FULL_MODULE_STR ".ParseError",PyExc_RuntimeError,NULL); + if(PyModule_AddObject(module,"ParseError",state->parse_error_exc_type)) goto error; + Py_INCREF(state->parse_error_exc_type); + + state->parse_warn_exc_type = PyErr_NewException(FULL_MODULE_STR ".ParseWarning",PyExc_UserWarning,NULL); + if(PyModule_AddObject(module,"ParseWarning",state->parse_warn_exc_type)) goto error; + Py_INCREF(state->parse_warn_exc_type); + + for(; tu_itag_names[tu_i] = PyUnicode_FromString(tagged_union_names[tu_i]); + if(state->tag_names[tu_i] == NULL) goto error; + } + +//% if char_enum_chars + for(; char_ichar_objects[char_i] = PyUnicode_FromStringAndSize(&enum_chars[char_i],1); + if(state->char_objects[char_i] == NULL) goto error; + } +//% endif + + enum_base = load_enum_base(); + if(enum_base == NULL) goto error; +//% for type in types|select('enumeration_t') + if(create_enum( + state->enum_values__{$ type $}, + module, + enum_base, + FULL_MODULE_STR ".{$ type $}", + enum_{$ 'id' if type.any_renamed else 'value' $}_str__{$ type $}, + enum_value_str__{$ type $}, + ENUM_VALUE_COUNT__{$ type $})) goto error; +//% endfor + Py_DECREF(enum_base); + enum_base = NULL; + + for(; iclasses[CLASS_FROZEN_LIST]); + Py_INCREF(state->classes[CLASS_FROZEN_LIST]); + state->classes[i] = (PyTypeObject*)PyType_FromSpecWithBases(&class_specs[i].spec,bases); + Py_DECREF(bases); + } else { + state->classes[i] = (PyTypeObject*)PyType_FromSpec(&class_specs[i].spec); + } + if(state->classes[i] == NULL) goto error; + if(PyModule_AddObject(module,class_specs[i].spec.name + sizeof(FULL_MODULE_STR),(PyObject*)state->classes[i])) { + ++i; + goto error; + }; + Py_INCREF(state->classes[i]); + } + + return 0; + + error: + Py_XDECREF(enum_base); +//% for type in types|select('enumeration_t') + if(state->enum_values__{$ type $}[0] != NULL) decref_array(state->enum_values__{$ type $},ENUM_VALUE_COUNT__{$ type $}); +//% endfor + decref_array((PyObject**)state->classes,i); + decref_array(state->tag_names,tu_i); + decref_array(state->char_objects,char_i); + Py_XDECREF(state->parse_warn_exc_type); + Py_DECREF(state->parse_error_exc_type); + state->parse_error_exc_type = NULL; + return -1; +} + +static int module_traverse(PyObject *module,visitproc visit,void *arg) { + int r; + module_state *state = PyModule_GetState(module); + if(state == NULL) return 0; + +//% for type in types|select('enumeration_t') + r = visit_array(state->enum_values__{$ type $},ENUM_VALUE_COUNT__{$ type $},visit,arg); + if(r) return r; +//% endfor + r = visit_array((PyObject**)state->classes,CLASS_COUNT,visit,arg); + if(r) return r; + r = visit(state->parse_warn_exc_type,arg); + if(r) return r; + return visit(state->parse_error_exc_type,arg); +} + +static void module_free(void *module) { + module_state *state = PyModule_GetState(module); + if(state->parse_error_exc_type == NULL) return; + +//% for type in types|select('enumeration_t') + decref_array(state->enum_values__{$ type $},ENUM_VALUE_COUNT__{$ type $}); +//% endfor + decref_array((PyObject**)state->classes,CLASS_COUNT); + decref_array(state->tag_names,TAGGED_UNION_NAME_COUNT); + decref_array(state->char_objects,ENUM_CHAR_COUNT); + Py_DECREF(state->parse_warn_exc_type); + Py_DECREF(state->parse_error_exc_type); +} + +/* Python 3.7 doesn't offer a way to get per-module state if multi-phase +initialization is used, so for now, single-phase initialization is used. + +static PyModuleDef_Slot m_slots[] = { + {Py_mod_exec,module_exec}, +#if PY_VERSION_HEX >= 0x030c00f0 + {Py_mod_multiple_interpreters,Py_MOD_PER_INTERPRETER_GIL_SUPPORTED}, +#endif + {0,NULL}}; +*/ + +static PyModuleDef module_def = { + .m_base = PyModuleDef_HEAD_INIT, + .m_name = FULL_MODULE_STR, + .m_size = sizeof(module_state), + .m_methods = module_functions, + /*.m_slots = m_slots,*/ + .m_traverse = module_traverse, + .m_free = module_free}; + +PyMODINIT_FUNC MAKE_INIT_FUN_NAME(MODULE_NAME)(void) { + /*return PyModuleDef_Init(&module_def);*/ + PyObject *r = PyModule_Create(&module_def); + if(module_exec(r)) { + Py_DECREF(r); + return NULL; + } + return r; +} diff --git a/xml_parser_generator/schema.json b/xml_parser_generator/schema.json new file mode 100644 index 00000000..702854c0 --- /dev/null +++ b/xml_parser_generator/schema.json @@ -0,0 +1,1431 @@ +{ + "roots": { + "doxygen": "DoxygenType", + "doxygenindex": "DoxygenTypeIndex" + }, + "types": { + "DoxygenTypeIndex": { + "kind": "tag_only_element", + "attributes": { + "lang": {"type": "#string"}, + "version": {"type": "#string"} + }, + "other_attr": "ignore", + "children": { + "compound": {"type": "CompoundType", "is_list": true, "min_items": 0} + } + }, + "CompoundType": { + "kind": "tag_only_element", + "attributes": { + "refid": {"type": "#string"}, + "kind": {"type": "CompoundKind"} + }, + "children": { + "name": {"type": "#string"}, + "member": {"type": "MemberType", "is_list": true, "min_items": 0} + } + }, + "DoxygenType": { + "kind": "tag_only_element", + "attributes": { + "lang": {"type": "#string"}, + "version": {"type": "#string"} + }, + "other_attr": "ignore", + "children": { + "compounddef": {"type": "compounddefType", "is_list": true, "min_items": 0} + } + }, + "compounddefType": { + "kind": "tag_only_element", + "attributes": { + "abstract": {"type": "#DoxBool", "optional": true}, + "final": {"type": "#DoxBool", "optional": true}, + "id": {"type": "#string"}, + "inline": {"type": "#DoxBool", "optional": true}, + "kind": {"type": "DoxCompoundKind"}, + "language": {"type": "DoxLanguage", "optional": true}, + "prot": {"type": "DoxProtectionKind"}, + "sealed": {"type": "#DoxBool", "optional": true} + }, + "children": { + "basecompoundref": {"type": "compoundRefType", "is_list": true, "min_items": 0}, + "briefdescription": {"type": "descriptionType", "min_items": 0}, + "collaborationgraph": {"type": "graphType", "min_items": 0}, + "compoundname": {"type": "#string"}, + "derivedcompoundref": {"type": "compoundRefType", "is_list": true, "min_items": 0}, + "detaileddescription": {"type": "descriptionType", "min_items": 0}, + "exports": {"type": "exportsType", "min_items": 0}, + "incdepgraph": {"type": "graphType", "min_items": 0}, + "includedby": {"type": "incType", "is_list": true, "min_items": 0}, + "includes": {"type": "incType", "is_list": true, "min_items": 0}, + "inheritancegraph": {"type": "graphType", "min_items": 0}, + "initializer": {"type": "linkedTextType", "min_items": 0}, + "innerclass": {"type": "refType", "is_list": true, "min_items": 0}, + "innerconcept": {"type": "refType", "is_list": true, "min_items": 0}, + "innerdir": {"type": "refType", "is_list": true, "min_items": 0}, + "innerfile": {"type": "refType", "is_list": true, "min_items": 0}, + "innergroup": {"type": "refType", "is_list": true, "min_items": 0}, + "innermodule": {"type": "refType", "is_list": true, "min_items": 0}, + "innernamespace": {"type": "refType", "is_list": true, "min_items": 0}, + "innerpage": {"type": "refType", "is_list": true, "min_items": 0}, + "invincdepgraph": {"type": "graphType", "min_items": 0}, + "listofallmembers": {"type": "listofallmembersType", "min_items": 0}, + "location": {"type": "locationType", "min_items": 0}, + "programlisting": {"type": "listingType", "min_items": 0}, + "qualifier": {"type": "#string", "is_list": true, "min_items": 0}, + "requiresclause": {"type": "linkedTextType", "min_items": 0}, + "sectiondef": {"type": "sectiondefType", "is_list": true, "min_items": 0}, + "tableofcontents": {"type": "tableofcontentsType", "min_items": 0}, + "templateparamlist": {"type": "templateparamlistType", "min_items": 0}, + "title": {"type": "#string", "min_items": 0} + } + }, + "graphType": { + "kind": "tag_only_element", + "children": { + "node": {"type": "nodeType", "is_list": true, "min_items": 1} + } + }, + "templateparamlistType": { + "kind": "tag_only_element", + "children": { + "param": {"type": "paramType", "is_list": true, "min_items": 0} + } + }, + "sectiondefType": { + "kind": "tag_only_element", + "attributes": { + "kind": {"type": "DoxSectionKind"} + }, + "children": { + "description": {"type": "descriptionType", "min_items": 0}, + "header": {"type": "#string", "min_items": 0}, + "member": {"type": "MemberType", "is_list": true, "min_items": 0}, + "memberdef": {"type": "memberdefType", "is_list": true, "min_items": 0} + } + }, + "tableofcontentsType": { + "kind": "tag_only_element", + "children": { + "tocsect": {"type": "tableofcontentsKindType", "is_list": true, "min_items": 1} + } + }, + "linkedTextType": { + "kind": "union_list_element", + "allow_text": true, + "content": { + "ref": "refTextType" + } + }, + "descriptionType": { + "kind": "union_list_element", + "allow_text": true, + "children": { + "title": {"type": "#string", "min_items": 0} + }, + "content": { + "internal": "docInternalType", + "para": "#string", + "sect1": "docSect1Type" + } + }, + "exportsType": { + "kind": "tag_only_element", + "children": { + "export": {"type": "exportType", "is_list": true, "min_items": 1} + } + }, + "listingType": { + "kind": "tag_only_element", + "attributes": { + "filename": {"type": "#string"} + }, + "children": { + "codeline": {"type": "codelineType", "is_list": true, "min_items": 0} + } + }, + "locationType": { + "kind": "tag_only_element", + "attributes": { + "bodyend": {"type": "#integer"}, + "bodyfile": {"type": "#string"}, + "bodystart": {"type": "#integer"}, + "column": {"type": "#integer"}, + "declcolumn": {"type": "#integer"}, + "declfile": {"type": "#string"}, + "declline": {"type": "#integer"}, + "file": {"type": "#string"}, + "line": {"type": "#integer"} + } + }, + "listofallmembersType": { + "kind": "tag_only_element", + "children": { + "member": {"type": "memberRefType", "is_list": true, "min_items": 0} + } + }, + "memberRefType": { + "kind": "tag_only_element", + "attributes": { + "ambiguityscope": {"type": "#string"}, + "prot": {"type": "DoxProtectionKind"}, + "refid": {"type": "#string"}, + "virt": {"type": "DoxVirtualKind"} + }, + "children": { + "name": {"type": "#string"}, + "scope": {"type": "#string"} + } + }, + "memberdefType": { + "kind": "tag_only_element", + "attributes": { + "accessor": {"type": "DoxAccessor", "optional": true}, + "add": {"type": "#DoxBool", "optional": true}, + "attribute": {"type": "#DoxBool", "optional": true}, + "bound": {"type": "#DoxBool", "optional": true}, + "const": {"type": "#DoxBool", "optional": true}, + "constexpr": {"type": "#DoxBool", "optional": true}, + "constrained": {"type": "#DoxBool", "optional": true}, + "explicit": {"type": "#DoxBool", "optional": true}, + "extern": {"type": "#DoxBool", "optional": true}, + "final": {"type": "#DoxBool", "optional": true}, + "gettable": {"type": "#DoxBool", "optional": true}, + "id": {"type": "#string"}, + "initonly": {"type": "#DoxBool", "optional": true}, + "inline": {"type": "#DoxBool", "optional": true}, + "kind": {"type": "DoxMemberKind"}, + "maybeambiguous": {"type": "#DoxBool", "optional": true}, + "maybedefault": {"type": "#DoxBool", "optional": true}, + "maybevoid": {"type": "#DoxBool", "optional": true}, + "mutable": {"type": "#DoxBool", "optional": true}, + "new": {"type": "#DoxBool", "optional": true}, + "noexcept": {"type": "#DoxBool", "optional": true}, + "optional": {"type": "#DoxBool", "optional": true}, + "privategettable": {"type": "#DoxBool", "optional": true}, + "privatesettable": {"type": "#DoxBool", "optional": true}, + "property": {"type": "#DoxBool", "optional": true}, + "prot": {"type": "DoxProtectionKind"}, + "protectedgettable": {"type": "#DoxBool", "optional": true}, + "protectedsettable": {"type": "#DoxBool", "optional": true}, + "raise": {"type": "#DoxBool", "optional": true, "py_name": "raise_"}, + "readable": {"type": "#DoxBool", "optional": true}, + "readonly": {"type": "#DoxBool", "optional": true}, + "refqual": {"type": "DoxRefQualifierKind", "optional": true}, + "removable": {"type": "#DoxBool", "optional": true}, + "remove": {"type": "#DoxBool", "optional": true}, + "required": {"type": "#DoxBool", "optional": true}, + "sealed": {"type": "#DoxBool", "optional": true}, + "settable": {"type": "#DoxBool", "optional": true}, + "static": {"type": "#DoxBool"}, + "strong": {"type": "#DoxBool", "optional": true}, + "transient": {"type": "#DoxBool", "optional": true}, + "virt": {"type": "DoxVirtualKind", "optional": true}, + "volatile": {"type": "#DoxBool", "optional": true}, + "writable": {"type": "#DoxBool", "optional": true} + }, + "children": { + "argsstring": {"type": "#string", "min_items": 0}, + "bitfield": {"type": "#string", "min_items": 0}, + "briefdescription": {"type": "descriptionType", "min_items": 0}, + "definition": {"type": "#string", "min_items": 0}, + "detaileddescription": {"type": "descriptionType", "min_items": 0}, + "enumvalue": {"type": "enumvalueType", "is_list": true, "min_items": 0}, + "exceptions": {"type": "linkedTextType", "min_items": 0}, + "inbodydescription": {"type": "descriptionType", "min_items": 0}, + "initializer": {"type": "linkedTextType", "min_items": 0}, + "location": {"type": "locationType"}, + "name": {"type": "#string"}, + "param": {"type": "paramType", "is_list": true, "min_items": 0}, + "qualifiedname": {"type": "#string", "min_items": 0}, + "qualifier": {"type": "#string", "is_list": true, "min_items": 0}, + "read": {"type": "#string", "min_items": 0}, + "referencedby": {"type": "referenceType", "is_list": true, "min_items": 0}, + "references": {"type": "referenceType", "is_list": true, "min_items": 0}, + "reimplementedby": {"type": "reimplementType", "is_list": true, "min_items": 0}, + "reimplements": {"type": "reimplementType", "is_list": true, "min_items": 0}, + "requiresclause": {"type": "linkedTextType", "min_items": 0}, + "templateparamlist": {"type": "templateparamlistType", "min_items": 0}, + "type": {"type": "linkedTextType", "min_items": 0}, + "write": {"type": "#string", "min_items": 0} + } + }, + "MemberType": { + "kind": "tag_only_element", + "attributes": { + "kind": {"type": "MemberKind"}, + "refid": {"type": "#string"} + }, + "children": { + "name": {"type": "#string"} + } + }, + "paramType": { + "kind": "tag_only_element", + "children": { + "array": {"type": "#string", "min_items": 0}, + "attributes": {"type": "#string", "min_items": 0}, + "briefdescription": {"type": "descriptionType", "min_items": 0}, + "declname": {"type": "#string", "min_items": 0}, + "defname": {"type": "#string", "min_items": 0}, + "defval": {"type": "linkedTextType", "min_items": 0}, + "type": {"type": "linkedTextType", "min_items": 0}, + "typeconstraint": {"type": "linkedTextType", "min_items": 0} + } + }, + "enumvalueType": { + "kind": "tag_only_element", + "attributes": { + "id": {"type": "#string"}, + "prot": {"type": "DoxProtectionKind"} + }, + "children": { + "briefdescription": {"type": "descriptionType", "min_items": 0}, + "detaileddescription": {"type": "descriptionType", "min_items": 0}, + "initializer": {"type": "linkedTextType", "min_items": 0}, + "name": {"type": "#string"} + } + }, + "referenceType": { + "kind": "union_list_element", + "allow_text": true, + "attributes": { + "compoundref": {"type": "#string"}, + "endline": {"type": "#integer"}, + "refid": {"type": "#string"}, + "startline": {"type": "#integer"} + } + }, + "docInternalType": { + "kind": "union_list_element", + "allow_text": true, + "content": { + "para": "#string", + "sect1": "docSect1Type" + } + }, + "docSect1Type": { + "kind": "union_list_element", + "allow_text": true, + "attributes": { + "id": {"type": "#string"} + }, + "children": { + "title": {"type": "#string", "min_items": 0} + }, + "content": { + "para": "docParaType", + "sect4": "docSect2Type", + "internal": "docInternalS1Type" + } + }, + "nodeType": { + "kind": "tag_only_element", + "attributes": { + "id": {"type": "#string"} + }, + "children": { + "childnode": {"type": "childnodeType", "is_list": true, "min_items": 0}, + "label": {"type": "#string"}, + "link": {"type": "linkType", "min_items": 0} + } + }, + "linkType": { + "kind": "tag_only_element", + "attributes": { + "external": {"type": "#string"}, + "refid": {"type": "#string"} + } + }, + "childnodeType": { + "kind": "tag_only_element", + "attributes": { + "refid": {"type": "#string"}, + "relation": {"type": "DoxGraphRelation"} + }, + "children": { + "edgelabel": {"type": "#string", "is_list": true, "min_items": 0} + } + }, + "codelineType": { + "kind": "tag_only_element", + "attributes": { + "external": {"type": "#DoxBool"}, + "lineno": {"type": "#integer"}, + "refid": {"type": "#string"}, + "refkind": {"type": "DoxRefKind"} + }, + "children": { + "highlight": {"type": "highlightType", "is_list": true, "min_items": 0} + } + }, + "highlightType": { + "kind": "union_list_element", + "allow_text": true, + "attributes": { + "class": {"type": "DoxHighlightClass", "py_name": "class_"} + }, + "content": { + "sp": "#spType", + "ref": "refTextType" + } + }, + "docInternalS1Type": { + "kind": "union_list_element", + "allow_text": true, + "content": { + "para": "#string", + "sect2": "docSect2Type" + } + }, + "docSect2Type": { + "kind": "union_list_element", + "allow_text": true, + "attributes": { + "id": {"type": "#string"} + }, + "children": { + "title": {"type": "#string"} + }, + "content": { + "para": "docParaType", + "sect3": "docSect3Type", + "internal": "docInternalS2Type" + } + }, + "docSect3Type": { + "kind": "union_list_element", + "allow_text": true, + "attributes": { + "id": {"type": "#string"} + }, + "children": { + "title": {"type": "#string"} + }, + "content": { + "para": "docParaType", + "sect4": "docSect4Type", + "internal": "docInternalS3Type" + } + }, + "docInternalS2Type": { + "kind": "union_list_element", + "allow_text": true, + "content": { + "para": "#string", + "sect3": "docSect3Type" + } + }, + "docSect4Type": { + "kind": "union_list_element", + "allow_text": true, + "attributes": { + "id": {"type": "#string"} + }, + "children": { + "title": {"type": "#string"} + }, + "content": { + "para": "docParaType", + "internal": "docInternalS4Type" + } + }, + "docInternalS3Type": { + "kind": "union_list_element", + "allow_text": true, + "content": { + "para": "#string", + "sect3": "docSect4Type" + } + }, + "docInternalS4Type": { + "kind": "union_list_element", + "allow_text": true, + "content": { + "para": "#string" + } + }, + "docListItemType": { + "kind": "list_element", + "attributes": { + "value": {"type": "#integer", "optional": true} + }, + "content": { + "para": "docParaType" + } + }, + "docCaptionType": { + "kind": "union_list_element", + "allow_text": true, + "attributes": { + "id": {"type": "#string"} + } + }, + "docRowType": { + "kind": "tag_only_element", + "children": { + "entry": {"type": "docEntryType", "is_list": true, "min_items": 0} + } + }, + "docEntryType": { + "kind": "tag_only_element", + "any_attr": true, + "attributes": { + "align": {"type": "DoxAlign"}, + "class": {"type": "#string", "py_name": "class_"}, + "colspan": {"type": "#integer"}, + "rowspan": {"type": "#integer"}, + "thead": {"type": "#DoxBool"}, + "valign": {"type": "DoxVerticalAlign"}, + "width": {"type": "#string"} + }, + "children": { + "para": {"type": "docParaType", "is_list": true, "min_items": 0} + } + }, + "docTocItemType": { + "kind": "union_list_element","allow_text": true, + "attributes": { + "id": {"type": "#string"} + } + }, + "docParamListItem": { + "kind": "tag_only_element", + "children": { + "parameterdescription": {"type": "descriptionType"}, + "parameternamelist": {"type": "docParamNameList", "is_list": true, "min_items": 0} + } + }, + "docParamNameList": { + "kind": "tag_only_element", + "children": { + "parametername": {"type": "docParamName", "is_list": true, "min_items": 0}, + "parametertype": {"type": "docParamType", "is_list": true, "min_items": 0} + } + }, + "docParamType": { + "kind": "union_list_element", + "allow_text": true, + "content": { + "ref": "refTextType" + } + }, + "docParamName": { + "kind": "union_list_element", + "allow_text": true, + "attributes": { + "direction": {"type": "DoxParamDir"} + }, + "content": { + "ref": "refTextType" + } + }, + "tableofcontentsKindType": { + "kind": "tag_only_element", + "children": { + "name": {"type": "#string"}, + "reference": {"type": "#string"}, + "tableofcontents": {"type": "tableofcontentsType", "is_list": true, "min_items": 0} + } + }, + "incType": { + "kind": "union_list_element", + "allow_text": true, + "attributes": { + "refid": {"type": "#string"}, + "local": {"type": "#DoxBool"} + } + }, + "compoundRefType": { + "kind": "union_list_element", + "allow_text": true, + "attributes": { + "refid": {"type": "#string", "optional": true}, + "prot": {"type": "DoxProtectionKind"}, + "virt": {"type": "DoxVirtualKind"} + } + }, + "refType": { + "kind": "union_list_element", + "allow_text": true, + "attributes": { + "refid": {"type": "#string"}, + "prot": {"type": "DoxProtectionKind", "optional": true}, + "inline": {"type": "#DoxBool", "optional": true} + } + }, + "exportType": { + "kind": "union_list_element", + "allow_text": true, + "attributes": { + "refid": {"type": "#string", "optional": true} + } + }, + "refTextType": { + "kind": "union_list_element", + "allow_text": true, + "attributes": { + "refid": {"type": "#string"}, + "kindref": {"type": "DoxRefKind"}, + "external": {"type": "#string", "optional": true}, + "tooltip": {"type": "#string", "optional": true} + } + }, + "reimplementType": { + "kind": "union_list_element","allow_text": true, + "attributes": { + "refid": {"type": "#string"} + } + }, + "DoxRefKind": { + "kind": "enumeration", + "values": ["compound", "member"] + }, + "MemberKind": { + "kind": "enumeration", + "values": [ + "define", + "property", + "event", + "variable", + "typedef", + "enum", + "enumvalue", + "function", + "signal", + "prototype", + "friend", + "dcop", + "slot" + ] + }, + "DoxMemberKind": { + "kind": "enumeration", + "values": [ + "define", + "property", + "event", + "variable", + "typedef", + "enum", + "function", + "signal", + "prototype", + "friend", + "dcop", + "slot", + "interface", + "service" + ] + }, + "docTitleCmdGroup": { + "kind": "union_list_element","allow_text": true, + "content": { + "ulink": "docURLLink", + "bold": "docMarkupType", + "s": "docMarkupType", + "strike": "docMarkupType", + "underline": "docMarkupType", + "emphasis": "docMarkupType", + "computeroutput": "docMarkupType", + "subscript": "docMarkupType", + "superscript": "docMarkupType", + "center": "docMarkupType", + "small": "docMarkupType", + "cite": "docMarkupType", + "del": "docMarkupType", + "ins": "docMarkupType", + "htmlonly": "docHtmlOnlyType", + "manonly": "#string", + "xmlonly": "#string", + "rtfonly": "#string", + "latexonly": "#string", + "docbookonly": "#string", + "image": "docImageType", + "dot": "docDotMscType", + "msc": "docDotMscType", + "plantuml": "docPlantumlType", + "anchor": "docAnchorType", + "formula": "docFormulaType", + "ref": "docRefTextType", + "emoji": "docEmojiType", + "linebreak": "#empty", + "nonbreakablespace": "#empty", + "iexcl": "#empty", + "cent": "#empty", + "pound": "#empty", + "curren": "#empty", + "yen": "#empty", + "brvbar": "#empty", + "sect": "#empty", + "umlaut": "#empty", + "copy": "#empty", + "ordf": "#empty", + "laquo": "#empty", + "not": "#empty", + "shy": "#empty", + "registered": "#empty", + "macr": "#empty", + "deg": "#empty", + "plusmn": "#empty", + "sup2": "#empty", + "sup3": "#empty", + "acute": "#empty", + "micro": "#empty", + "para": "#empty", + "middot": "#empty", + "cedil": "#empty", + "sup1": "#empty", + "ordm": "#empty", + "raquo": "#empty", + "frac14": "#empty", + "frac12": "#empty", + "frac34": "#empty", + "iquest": "#empty", + "Agrave": "#empty", + "Aacute": "#empty", + "Acirc": "#empty", + "Atilde": "#empty", + "Aumlaut": "#empty", + "Aring": "#empty", + "AElig": "#empty", + "Ccedil": "#empty", + "Egrave": "#empty", + "Eacute": "#empty", + "Ecirc": "#empty", + "Eumlaut": "#empty", + "Igrave": "#empty", + "Iacute": "#empty", + "Icirc": "#empty", + "Iumlaut": "#empty", + "ETH": "#empty", + "Ntilde": "#empty", + "Ograve": "#empty", + "Oacute": "#empty", + "Ocirc": "#empty", + "Otilde": "#empty", + "Oumlaut": "#empty", + "times": "#empty", + "Oslash": "#empty", + "Ugrave": "#empty", + "Uacute": "#empty", + "Ucirc": "#empty", + "Uumlaut": "#empty", + "Yacute": "#empty", + "THORN": "#empty", + "szlig": "#empty", + "agrave": "#empty", + "aacute": "#empty", + "acirc": "#empty", + "atilde": "#empty", + "aumlaut": "#empty", + "aring": "#empty", + "aelig": "#empty", + "ccedil": "#empty", + "egrave": "#empty", + "eacute": "#empty", + "ecirc": "#empty", + "eumlaut": "#empty", + "igrave": "#empty", + "iacute": "#empty", + "icirc": "#empty", + "iumlaut": "#empty", + "eth": "#empty", + "ntilde": "#empty", + "ograve": "#empty", + "oacute": "#empty", + "ocirc": "#empty", + "otilde": "#empty", + "oumlaut": "#empty", + "divide": "#empty", + "oslash": "#empty", + "ugrave": "#empty", + "uacute": "#empty", + "ucirc": "#empty", + "uumlaut": "#empty", + "yacute": "#empty", + "thorn": "#empty", + "yumlaut": "#empty", + "fnof": "#empty", + "Alpha": "#empty", + "Beta": "#empty", + "Gamma": "#empty", + "Delta": "#empty", + "Epsilon": "#empty", + "Zeta": "#empty", + "Eta": "#empty", + "Theta": "#empty", + "Iota": "#empty", + "Kappa": "#empty", + "Lambda": "#empty", + "Mu": "#empty", + "Nu": "#empty", + "Xi": "#empty", + "Omicron": "#empty", + "Pi": "#empty", + "Rho": "#empty", + "Sigma": "#empty", + "Tau": "#empty", + "Upsilon": "#empty", + "Phi": "#empty", + "Chi": "#empty", + "Psi": "#empty", + "Omega": "#empty", + "alpha": "#empty", + "beta": "#empty", + "gamma": "#empty", + "delta": "#empty", + "epsilon": "#empty", + "zeta": "#empty", + "eta": "#empty", + "theta": "#empty", + "iota": "#empty", + "kappa": "#empty", + "lambda": "#empty", + "mu": "#empty", + "nu": "#empty", + "xi": "#empty", + "omicron": "#empty", + "pi": "#empty", + "rho": "#empty", + "sigmaf": "#empty", + "sigma": "#empty", + "tau": "#empty", + "upsilon": "#empty", + "phi": "#empty", + "chi": "#empty", + "psi": "#empty", + "omega": "#empty", + "thetasym": "#empty", + "upsih": "#empty", + "piv": "#empty", + "bull": "#empty", + "hellip": "#empty", + "prime": "#empty", + "Prime": "#empty", + "oline": "#empty", + "frasl": "#empty", + "weierp": "#empty", + "imaginary": "#empty", + "real": "#empty", + "trademark": "#empty", + "alefsym": "#empty", + "larr": "#empty", + "uarr": "#empty", + "rarr": "#empty", + "darr": "#empty", + "harr": "#empty", + "crarr": "#empty", + "lArr": "#empty", + "uArr": "#empty", + "rArr": "#empty", + "dArr": "#empty", + "hArr": "#empty", + "forall": "#empty", + "part": "#empty", + "exist": "#empty", + "empty": "#empty", + "nabla": "#empty", + "isin": "#empty", + "notin": "#empty", + "ni": "#empty", + "prod": "#empty", + "sum": "#empty", + "minus": "#empty", + "lowast": "#empty", + "radic": "#empty", + "prop": "#empty", + "infin": "#empty", + "ang": "#empty", + "and": "#empty", + "or": "#empty", + "cap": "#empty", + "cup": "#empty", + "int": "#empty", + "there4": "#empty", + "sim": "#empty", + "cong": "#empty", + "asymp": "#empty", + "ne": "#empty", + "equiv": "#empty", + "le": "#empty", + "ge": "#empty", + "sub": "#empty", + "sup": "#empty", + "nsub": "#empty", + "sube": "#empty", + "supe": "#empty", + "oplus": "#empty", + "otimes": "#empty", + "perp": "#empty", + "sdot": "#empty", + "lceil": "#empty", + "rceil": "#empty", + "lfloor": "#empty", + "rfloor": "#empty", + "lang": "#empty", + "rang": "#empty", + "loz": "#empty", + "spades": "#empty", + "clubs": "#empty", + "hearts": "#empty", + "diams": "#empty", + "OElig": "#empty", + "oelig": "#empty", + "Scaron": "#empty", + "scaron": "#empty", + "Yumlaut": "#empty", + "circ": "#empty", + "tilde": "#empty", + "ensp": "#empty", + "emsp": "#empty", + "thinsp": "#empty", + "zwnj": "#empty", + "zwj": "#empty", + "lrm": "#empty", + "rlm": "#empty", + "ndash": "#empty", + "mdash": "#empty", + "lsquo": "#empty", + "rsquo": "#empty", + "sbquo": "#empty", + "ldquo": "#empty", + "rdquo": "#empty", + "bdquo": "#empty", + "dagger": "#empty", + "Dagger": "#empty", + "permil": "#empty", + "lsaquo": "#empty", + "rsaquo": "#empty", + "euro": "#empty", + "tm": "#empty" + } + }, + "docCmdGroup": { + "kind": "union_list_element","allow_text": true, + "bases": ["docTitleCmdGroup"], + "content": { + "hruler": "#empty", + "preformatted": "docMarkupType", + "programlisting": "listingType", + "verbatim": "#string", + "javadocliteral": "#string", + "javadoccode": "#string", + "indexentry": "docIndexEntryType", + "orderedlist": "docListType", + "itemizedlist": "docListType", + "simplesect": "docSimpleSectType", + "title": "docTitleType", + "variablelist": "docVariableListType", + "table": "docTableType", + "heading": "docHeadingType", + "dotfile": "docImageFileType", + "mscfile": "docImageFileType", + "diafile": "docImageFileType", + "toclist": "docTocListType", + "language": "docLanguageType", + "parameterlist": "docParamListType", + "xrefsect": "docXRefSectType", + "copydoc": "docCopyType", + "details": "docDetailsType", + "blockquote": "docBlockQuoteType", + "parblock": "docParBlockType" + } + }, + "docParaType": { + "kind": "union_list_element", + "bases": ["docCmdGroup"] + }, + "docMarkupType": { + "kind": "union_list_element", + "bases": ["docCmdGroup"] + }, + "docTitleType": { + "kind": "union_list_element", + "bases": ["docTitleCmdGroup"] + }, + "docSummaryType": { + "kind": "union_list_element", + "bases": ["docTitleCmdGroup"] + }, + "docURLLink": { + "kind": "union_list_element", + "bases": ["docTitleCmdGroup"], + "attributes": { + "url": {"type": "#string"} + } + }, + "docHtmlOnlyType": { + "kind": "union_list_element", + "allow_text": true, + "attributes": { + "block": {"type": "#string"} + } + }, + "docImageType": { + "kind": "union_list_element", + "bases": ["docTitleCmdGroup"], + "attributes": { + "type": {"type": "DoxImageKind", "optional": true}, + "name": {"type": "#string", "optional": true}, + "width": {"type": "#string", "optional": true}, + "height": {"type": "#string", "optional": true}, + "alt": {"type": "#string", "optional": true}, + "inline": {"type": "#DoxBool", "optional": true}, + "caption": {"type": "#string", "optional": true} + } + }, + "docDotMscType": { + "kind": "union_list_element", + "bases": ["docTitleCmdGroup"], + "attributes": { + "name": {"type": "#string", "optional": true}, + "width": {"type": "#string", "optional": true}, + "height": {"type": "#string", "optional": true}, + "caption": {"type": "#string", "optional": true} + } + }, + "docPlantumlType": { + "kind": "union_list_element", + "bases": ["docTitleCmdGroup"], + "attributes": { + "name": {"type": "#string", "optional": true}, + "width": {"type": "#string", "optional": true}, + "height": {"type": "#string", "optional": true}, + "caption": {"type": "#string", "optional": true}, + "engine": {"type": "DoxPlantumlEngine", "optional": true} + } + }, + "docRefTextType": { + "kind": "union_list_element", + "bases": ["docTitleCmdGroup"], + "attributes": { + "refid": {"type": "#string"}, + "kindref": {"type": "#string"}, + "external": {"type": "#string"} + } + }, + "docHeadingType": { + "kind": "union_list_element", + "bases": ["docTitleCmdGroup"], + "attributes": { + "level": {"type": "#integer"} + } + }, + "docImageFileType": { + "kind": "union_list_element", + "bases": ["docTitleCmdGroup"], + "attributes": { + "width": {"type": "#string", "optional": true}, + "height": {"type": "#string", "optional": true} + } + }, + "docAnchorType": { + "kind": "union_list_element", + "allow_text": true, + "attributes": { + "id": {"type": "#string"} + } + }, + "docFormulaType": { + "kind": "union_list_element", + "allow_text": true, + "attributes": { + "id": {"type": "#string"} + } + }, + "docEmojiType": { + "kind": "tag_only_element", + "attributes": { + "name": {"type": "#string"}, + "unicode": {"type": "#string"} + } + }, + "docIndexEntryType": { + "kind": "tag_only_element", + "children": { + "primaryie": {"type": "#string"}, + "secondaryie": {"type": "#string"} + } + }, + "docListType": { + "kind": "list_element", + "min_items": 1, + "attributes": { + "type": {"type": "DoxOlType"}, + "start": {"type": "#integer"} + }, + "content": { + "listitem": "docListItemType" + } + }, + "docSimpleSectType": { + "kind": "tag_only_element", + "attributes": { + "type": {"type": "DoxSimpleSectKind"} + }, + "children": { + "title": {"type": "docTitleType", "min_items": 0}, + "para": {"type": "docParaType", "is_list": true} + } + }, + "docVariableListType": { + "kind": "tuple_list_element", + "min_items": 1, + "content": { + "varlistentry": "docVarListEntryType", + "listitem": "docListItemType" + } + }, + "docTableType" : { + "kind": "tag_only_element", + "attributes": { + "rows": {"type": "#integer"}, + "cols": {"type": "#integer"}, + "width": {"type": "#string"} + }, + "children": { + "caption": {"type": "docCaptionType", "min_items": 0}, + "row": {"type": "docRowType", "is_list": true, "min_items": 0} + } + }, + "docTocListType": { + "kind": "list_element", + "content": { + "tocitem": "docTocItemType" + } + }, + "docLanguageType": { + "kind": "list_element", + "attributes": { + "langid": {"type": "#string"} + }, + "content": { + "para": "docParaType" + } + }, + "docParamListType": { + "kind": "list_element", + "attributes": { + "kind": {"type": "DoxParamListKind"} + }, + "content": { + "parameteritem": "docParamListItem" + } + }, + "docXRefSectType": { + "kind": "tag_only_element", + "attributes": { + "id": {"type": "#string"} + }, + "children": { + "xreftitle": {"type": "#string", "is_list": true, "min_items": 0}, + "xrefdescription": {"type": "descriptionType"} + } + }, + "docCopyType": { + "kind": "tag_only_element", + "attributes": { + "link": {"type": "#string"} + }, + "children": { + "para": {"type": "docParaType", "is_list": true, "min_items": 0}, + "sec1": {"type": "docSect1Type", "is_list": true, "min_items": 0}, + "internal": {"type": "docInternalType", "min_items": 0} + } + }, + "docDetailsType": { + "kind": "tag_only_element", + "children": { + "summary": {"type": "docSummaryType", "min_items": 0}, + "para": {"type": "docParaType", "is_list": true, "min_items": 0} + } + }, + "docBlockQuoteType": { + "kind": "list_element", + "content": { + "para": "docParaType" + } + }, + "docParBlockType": { + "kind": "list_element", + "content": { + "para": "docParaType" + } + }, + "docVarListEntryType": { + "kind": "tag_only_element", + "children": { + "term": {"type": "docTitleType"} + } + }, + "DoxCompoundKind": { + "kind": "enumeration", + "values": [ + {"xml": "class", "id": "class_"}, + "struct", + "union", + "interface", + "protocol", + "category", + "exception", + "service", + "singleton", + "module", + "type", + "file", + "namespace", + "group", + "page", + "example", + "dir", + "concept" + ] + }, + "CompoundKind": { + "kind": "enumeration", + "values": [ + {"xml": "class", "id": "class_"}, + "struct", + "union", + "interface", + "protocol", + "category", + "exception", + "module", + "type", + "file", + "namespace", + "group", + "page", + "example", + "dir", + "concept" + ] + }, + "DoxLanguage": { + "kind": "enumeration", + "values": [ + "Unknown", + "IDL", + "Java", + {"xml": "C#", "id": "CSharp"}, + "D", + "PHP", + {"xml": "Objective-C", "id": "Objective_C"}, + {"xml": "C++", "id": "CPlusPlus"}, + "JavaScript", + "Python", + "Fortran", + "VHDL", + "XML", + "SQL", + "Markdown", + "Slice", + "Lex" + ] + }, + "DoxProtectionKind": { + "kind": "enumeration", + "values": [ + "public", + "protected", + "private", + "package" + ] + }, + "DoxRefQualifierKind": { + "kind": "enumeration", + "values": [ + "lvalue", + "rvalue" + ] + }, + "DoxVirtualKind": { + "kind": "enumeration", + "values": [ + {"xml": "non-virtual", "id": "non_virtual"}, + "virtual", + {"xml": "pure-virtual", "id": "pure_virtual"} + ] + }, + "DoxSectionKind": { + "kind": "enumeration", + "values": [ + {"xml": "user-defined", "id": "user_defined"}, + {"xml": "public-type", "id": "public_type"}, + {"xml": "public-func", "id": "public_func"}, + {"xml": "public-attrib", "id": "public_attrib"}, + {"xml": "public-slot", "id": "public_slot"}, + "signal", + {"xml": "dcop-func", "id": "dcop_func"}, + "property", + "event", + {"xml": "public-static-func", "id": "public_static_func"}, + {"xml": "public-static-attrib", "id": "public_static_attrib"}, + {"xml": "protected-type", "id": "protected_type"}, + {"xml": "protected-func", "id": "protected_func"}, + {"xml": "protected-attrib", "id": "protected_attrib"}, + {"xml": "protected-slot", "id": "protected_slot"}, + {"xml": "protected-static-func", "id": "protected_static_func"}, + {"xml": "protected-static-attrib", "id": "protected_static_attrib"}, + {"xml": "package-type", "id": "package_type"}, + {"xml": "package-func", "id": "package_func"}, + {"xml": "package-attrib", "id": "package_attrib"}, + {"xml": "package-static-func", "id": "package_static_func"}, + {"xml": "package-static-attrib", "id": "package_static_attrib"}, + {"xml": "private-type", "id": "private_type"}, + {"xml": "private-func", "id": "private_func"}, + {"xml": "private-attrib", "id": "private_attrib"}, + {"xml": "private-slot", "id": "private_slot"}, + {"xml": "private-static-func", "id": "private_static_func"}, + {"xml": "private-static-attrib", "id": "private_static_attrib"}, + "friend", + "related", + "define", + "prototype", + "typedef", + "enum", + "func", + "var" + ] + }, + "DoxHighlightClass": { + "kind": "enumeration", + "values": [ + "comment", + "normal", + "preprocessor", + "keyword", + "keywordtype", + "keywordflow", + "stringliteral", + "xmlcdata", + "charliteral", + "vhdlkeyword", + "vhdllogic", + "vhdlchar", + "vhdldigit" + ] + }, + "DoxSimpleSectKind": { + "kind": "enumeration", + "values": [ + "see", + {"xml": "return", "id": "return_"}, + "author", + "authors", + "version", + "since", + "date", + "note", + "warning", + "pre", + "post", + "copyright", + "invariant", + "remark", + "attention", + "par", + "rcs" + ] + }, + "DoxImageKind": { + "kind": "enumeration", + "values": [ + "html", + "latex", + "docbook", + "rtf", + "xml" + ] + }, + "DoxPlantumlEngine": { + "kind": "enumeration", + "values": [ + "uml", + "bpm", + "wire", + "dot", + "ditaa", + "salt", + "math", + "latex", + "gantt", + "mindmap", + "wbs", + "yaml", + "creole", + "json", + "flow", + "board", + "git", + "hcl", + "regex", + "ebnf" + ] + }, + "DoxParamListKind": { + "kind": "enumeration", + "values": [ + "param", + "retval", + "exception", + "templateparam" + ] + }, + "DoxParamDir": { + "kind": "enumeration", + "values": [ + {"xml": "in", "id": "in_"}, + "out", + "inout" + ] + }, + "DoxAccessor": { + "kind": "enumeration", + "values": [ + "retain", + "copy", + "assign", + "weak", + "strong", + "unretained" + ] + }, + "DoxAlign": { + "kind": "enumeration", + "values": [ + "left", + "right", + "center" + ] + }, + "DoxVerticalAlign": { + "kind": "enumeration", + "values": [ + "bottom", + "top", + "middle" + ] + }, + "DoxGraphRelation": { + "kind": "enumeration", + "values": [ + "include", + "usage", + {"xml": "template-instance", "id": "template_instance"}, + {"xml": "public-inheritance", "id": "public_inheritance"}, + {"xml": "protected-inheritance", "id": "protected_inheritance"}, + {"xml": "private-inheritance", "id": "private_inheritance"}, + {"xml": "type-constraint", "id": "type_constraint"} + ] + }, + "DoxOlType": { + "kind": "char_enumeration", + "values": "1aAiI" + } + } +} diff --git a/xml_parser_generator/stubs_template.pyi b/xml_parser_generator/stubs_template.pyi new file mode 100644 index 00000000..a67a36f5 --- /dev/null +++ b/xml_parser_generator/stubs_template.pyi @@ -0,0 +1,124 @@ +import enum +from typing import Generic,Literal,overload,Protocol,SupportsIndex,TypeVar +from collections.abc import Iterable + +T = TypeVar('T') +U = TypeVar('U') + +class SupportsRead(Protocol): + def read(self, length: int, /) -> bytes | bytearray: ... + +class FrozenList(Generic[T]): + def __init__(self, items: Iterable[T]): ... + def __len__(self) -> int: ... + def __getitem__(self, i: SupportsIndex) -> T: ... + +class TaggedValue(Generic[T, U]): + name: T + value: U + + def __init__(self, name: T, value: U): ... + + def __len__(self) -> Literal[2]: ... + + @overload + def __getitem__(self, i: Literal[0]) -> T: ... + + @overload + def __getitem__(self, i: Literal[1]) -> U: ... + + @overload + def __getitem__(self, i: SupportsIndex) -> T | U: ... + +class ParseError(RuntimeError): + pass + +class ParseWarning(UserWarning): + pass + +TopLevel = ( +//% for name,type in root_elements + {$ '| ' if not loop.first $}TaggedValue[Literal['{$ name $}'],{$ type.py_name $}] +//% endfor +) + +def parse_str(data: str, /) -> TopLevel: ... + +def parse_file(file: SupportsRead, /) -> TopLevel: ... + + +//% macro emit_fields(type) +{%- for b in type.bases %}{$ emit_fields(b) $}{% endfor -%} +//% for ref in type|attributes + {$ ref.py_name $}: {$ ref.py_type() $} +//% endfor +//% for ref in type|children + {$ ref.py_name $}: {$ ref.py_type() $} +//% endfor +//% endmacro + +//% macro emit_content_fields(type) +{%- for b in type.bases %}{$ emit_content_fields(b) $}{% endfor -%} +//% for cname,ctype in type|content + {$ cname $}: {$ ctype.py_name $} +//% endfor +//% endmacro + +//% for type in types +//% if type is content_tuple +//% set list_item_type = 'ListItem_'~type +class ListItem_{$ type $}: +{$ emit_content_fields(type) $} + def __init__(self{% for cname,ctype in type|content %}, {$ cname $}: {$ ctype.py_name $}{% endfor %}): ... + + def __len__(self) -> Literal[{$ type|content|length $}]: ... +//% for cname,ctype in type|content + @overload + def __getitem__(self,i: Literal[{$ loop.index0 $}]) -> {$ ctype.py_name $}: ... +//% endfor + @overload + def __getitem__(self,i: SupportsIndex) -> {$ type|content|map('last')|map(attribute='py_name')|join(' | ') $}: ... + +//% elif type is content_union +//% set members = type.py_union_list()|sort +//% if members|length > 1 +//% set list_item_type = 'ListItem_'~type +ListItem_{$ type $} = ( +//% for m in members + {$ '| ' if not loop.first $}{$ m $} +//% endfor +) + +//% else +//% set list_item_type = members|first +//% endif +//% elif type is content_bare +//% set list_item_type = (type|content|first)[1].py_name +//% elif type is list_e +{$ "invalid content type"|error $} +//% endif +//% if type is used_directly +class Node_{$ type $}: +{$ emit_fields(type) $} + def __init__(self{$ ', __items: Iterable['~list_item_type~'], /' if type is list_e $} + {%- if type|field_count -%}, * + {%- for f in type.all_fields() if f is not optional %}, {$ f.py_name $}: {$ f.py_type(true) $}{% endfor -%} + {%- for f in type.all_fields() if f is optional %}, {$ f.py_name $}: {$ f.py_type(true) $} = ...{% endfor -%} + {%- endif %}): ... + +//% if type is list_e + def __len__(self) -> int: ... + + def __getitem__(self,i: SupportsIndex) -> {$ list_item_type $}: ... + +//% endif +//% elif type is enumeration_t +class {$ type $}(enum.Enum): +//% for entry in type.children + {$ entry.id $} = '{$ entry.xml $}' +//% endfor + +//% elif type is char_enum_t +{$ type $} = Literal[{% for c in type.values %}{$ "'"~c~"'" $}{$ ',' if not loop.last $}{% endfor %}] +//% endif +//% endfor From 3d05263b7223b33dffed5206c2218427e3c08528 Mon Sep 17 00:00:00 2001 From: Rouslan Korneychuk Date: Fri, 17 Nov 2023 14:01:38 -0500 Subject: [PATCH 18/65] Made progress on new parser integration --- breathe/__init__.py | 8 +- breathe/directives/__init__.py | 27 +- breathe/finder/__init__.py | 5 +- breathe/renderer/sphinxrenderer.py | 509 +++++++++++------------- tests/data/arange.xml | 11 + tests/data/ellipsis.xml | 7 +- tests/test_renderer.py | 205 +++++----- tests/test_utils.py | 19 +- xml_parser_generator/module_template.c | 153 +++++-- xml_parser_generator/stubs_template.pyi | 13 +- 10 files changed, 514 insertions(+), 443 deletions(-) diff --git a/breathe/__init__.py b/breathe/__init__.py index b0282794..feefadfc 100644 --- a/breathe/__init__.py +++ b/breathe/__init__.py @@ -1,7 +1,3 @@ -from breathe.directives.setup import setup as directive_setup -from breathe.file_state_cache import setup as file_state_cache_setup -from breathe.renderer.sphinxrenderer import setup as renderer_setup - from sphinx.application import Sphinx # Keep in sync with setup.py __version__ @@ -9,6 +5,10 @@ def setup(app: Sphinx): + from breathe.directives.setup import setup as directive_setup + from breathe.file_state_cache import setup as file_state_cache_setup + from breathe.renderer.sphinxrenderer import setup as renderer_setup + directive_setup(app) file_state_cache_setup(app) renderer_setup(app) diff --git a/breathe/directives/__init__.py b/breathe/directives/__init__.py index 9988d610..18a0f7f3 100644 --- a/breathe/directives/__init__.py +++ b/breathe/directives/__init__.py @@ -1,22 +1,27 @@ +from __future__ import annotations + from breathe.finder.factory import FinderFactory -from breathe.parser import DoxygenParserFactory from breathe.parser import FileIOError, ParserError -from breathe.project import ProjectInfoFactory, ProjectInfo from breathe.renderer import format_parser_error, RenderContext -from breathe.renderer.filter import Filter, FilterFactory -from breathe.renderer.mask import MaskFactoryBase +from breathe.renderer.filter import FilterFactory from breathe.renderer.sphinxrenderer import SphinxRenderer -from breathe.renderer.target import TargetHandler from sphinx.directives import SphinxDirective from docutils import nodes -from typing import Any, Dict, List, Optional, Sequence +from typing import Any, TYPE_CHECKING, Sequence + +if TYPE_CHECKING: + from breathe.parser import DoxygenParserFactory + from breathe.project import ProjectInfoFactory, ProjectInfo + from breathe.renderer.filter import Filter + from breathe.renderer.mask import MaskFactoryBase + from breathe.renderer.target import TargetHandler class _WarningHandler: - def __init__(self, state, context: Dict[str, Any]) -> None: + def __init__(self, state, context: dict[str, Any]) -> None: self.state = state self.context = context @@ -24,9 +29,9 @@ def warn( self, raw_text: str, *, - rendered_nodes: Optional[Sequence[nodes.Node]] = None, + rendered_nodes: Sequence[nodes.Node] | None = None, unformatted_suffix: str = "" - ) -> List[nodes.Node]: + ) -> list[nodes.Node]: raw_text = self.format(raw_text) + unformatted_suffix if rendered_nodes is None: rendered_nodes = [nodes.paragraph("", "", nodes.Text(raw_text))] @@ -75,7 +80,7 @@ def filter_factory(self) -> FilterFactory: def kind(self) -> str: raise NotImplementedError - def create_warning(self, project_info: Optional[ProjectInfo], **kwargs) -> _WarningHandler: + def create_warning(self, project_info: ProjectInfo | None, **kwargs) -> _WarningHandler: if project_info: tail = 'in doxygen xml output for project "{project}" from directory: {path}'.format( project=project_info.name(), path=project_info.project_path() @@ -94,7 +99,7 @@ def render( target_handler: TargetHandler, mask_factory: MaskFactoryBase, directive_args, - ) -> List[nodes.Node]: + ) -> list[nodes.Node]: "Standard render process used by subclasses" try: diff --git a/breathe/finder/__init__.py b/breathe/finder/__init__.py index 65bacc21..7cd34aea 100644 --- a/breathe/finder/__init__.py +++ b/breathe/finder/__init__.py @@ -1,12 +1,11 @@ from __future__ import annotations -from breathe.project import ProjectInfo -from breathe.renderer.filter import Filter - from typing import TYPE_CHECKING if TYPE_CHECKING: + from breathe.project import ProjectInfo from breathe.finder.factory import DoxygenItemFinderFactory + from breathe.renderer.filter import Filter def stack(element, list_): """Stack an element on to the start of a list and return as a new list""" diff --git a/breathe/renderer/sphinxrenderer.py b/breathe/renderer/sphinxrenderer.py index 39c118c0..d0956ce7 100644 --- a/breathe/renderer/sphinxrenderer.py +++ b/breathe/renderer/sphinxrenderer.py @@ -1,22 +1,17 @@ +from __future__ import annotations + import os import sphinx from breathe import parser -from breathe.project import ProjectInfo -from breathe.renderer import RenderContext -from breathe.renderer.filter import Filter -from breathe.renderer.target import TargetHandler from sphinx import addnodes -from sphinx.application import Sphinx -from sphinx.directives import ObjectDescription from sphinx.domains import cpp, c, python from sphinx.util.nodes import nested_parse_with_titles from sphinx.util import url_re from sphinx.ext.graphviz import graphviz from docutils import nodes -from docutils.nodes import Node, TextElement from docutils.statemachine import StringList, UnexpectedIndentationError from docutils.parsers.rst.states import Text @@ -32,7 +27,18 @@ import re import textwrap -from typing import Any, Callable, cast, Dict, List, Optional, Type, Union +from typing import Any, Callable, Optional, Type, TYPE_CHECKING, Union +from collections.abc import Iterable + +if TYPE_CHECKING: + from breathe.project import ProjectInfo + from breathe.renderer import RenderContext + from breathe.renderer.filter import Filter + from breathe.renderer.target import TargetHandler + + from sphinx.application import Sphinx + from sphinx.directives import ObjectDescription + from docutils.nodes import Node, TextElement ContentCallback = Callable[[addnodes.desc_content], None] Declarator = Union[addnodes.desc_signature, addnodes.desc_signature_line] @@ -42,7 +48,7 @@ class WithContext: - def __init__(self, parent: "SphinxRenderer", context: RenderContext): + def __init__(self, parent: SphinxRenderer, context: RenderContext): self.context = context self.parent = parent self.previous = None @@ -304,8 +310,8 @@ class DomainDirectiveFactory: @staticmethod def create(domain: str, args) -> ObjectDescription: - cls = cast(Type[ObjectDescription], None) - name = cast(str, None) + cls: Type[ObjectDescription] + name: str if domain == "c": cls, name = DomainDirectiveFactory.c_classes[args[0]] elif domain == "py": @@ -376,19 +382,19 @@ def intersperse(iterable, delimiter): yield x -def get_param_decl(param): - def to_string(node): +def get_param_decl(param: parser.Node_paramType) -> str: + def to_string(node: parser.Node_linkedTextType | None) -> str: """Convert Doxygen node content to a string.""" - result = [] + result: list[str] = [] if node is not None: - for p in node.content_: - value = p.value - if not isinstance(value, str): - value = value.valueOf_ - result.append(value) + for p in node: + if isinstance(p, str): + result.append(p) + else: + result.append(p.value[0]) return " ".join(result) - param_type = to_string(param.type_) + param_type = to_string(param.type) param_name = param.declname if param.declname else param.defname if not param_name: param_decl = param_type @@ -570,12 +576,12 @@ def get_filename(node) -> Optional[str]: except AttributeError: return None - self.context = cast(RenderContext, self.context) + assert self.context is not None node_stack = self.context.node_stack node = node_stack[0] - # An enumvalue node doesn't have location, so use its parent node for detecting - # the domain instead. - if isinstance(node, str) or node.node_type == "enumvalue": + # An enumvalueType node doesn't have location, so use its parent node + # for detecting the domain instead. + if isinstance(node, (str,parser.Node_enumvalueType)): node = node_stack[1] filename = get_filename(node) if not filename and node.node_type == "compound": @@ -583,15 +589,15 @@ def get_filename(node) -> Optional[str]: filename = get_filename(file_data.compounddef) return self.project_info.domain_for_file(filename) if filename else "" - def join_nested_name(self, names: List[str]) -> str: + def join_nested_name(self, names: list[str]) -> str: dom = self.get_domain() sep = "::" if not dom or dom == "cpp" else "." return sep.join(names) def run_directive( self, obj_type: str, declaration: str, contentCallback: ContentCallback, options={} - ) -> List[Node]: - self.context = cast(RenderContext, self.context) + ) -> list[Node]: + assert self.context is not None args = [obj_type, [declaration]] + self.context.directive_args[2:] directive = DomainDirectiveFactory.create(self.context.domain, args) assert issubclass(type(directive), BaseObject) @@ -654,7 +660,7 @@ def handle_declaration( display_obj_type: Optional[str] = None, declarator_callback: Optional[DeclaratorCallback] = None, options={}, - ) -> List[Node]: + ) -> list[Node]: if obj_type is None: obj_type = node.kind if content_callback is None: @@ -721,7 +727,7 @@ def content(contentnode): declarator_callback(declarator) return nodes_ - def get_qualification(self) -> List[str]: + def get_qualification(self) -> list[str]: if self.nesting_level > 0: return [] @@ -738,7 +744,7 @@ def debug_print_node(n): ) _debug_indent += 1 - names: List[str] = [] + names: list[str] = [] for node in self.qualification_stack[1:]: if config.breathe_debug_trace_qualification: print("{}{}".format(_debug_indent * " ", debug_print_node(node))) @@ -857,7 +863,7 @@ def create_doxygen_target(self, node): refid = self.get_refid(node.id) return self.target_handler.create_target(refid) - def title(self, node) -> List[Node]: + def title(self, node) -> list[Node]: nodes_ = [] # Variable type or function return type @@ -867,16 +873,16 @@ def title(self, node) -> List[Node]: nodes_.append(addnodes.desc_name(text=node.name)) return nodes_ - def description(self, node) -> List[Node]: + def description(self, node) -> list[Node]: brief = self.render_optional(node.briefdescription) detailed = self.detaileddescription(node) return brief + detailed - def detaileddescription(self, node) -> List[Node]: + def detaileddescription(self, node) -> list[Node]: detailedCand = self.render_optional(node.detaileddescription) # all field_lists must be at the top-level of the desc_content, so pull them up - fieldLists: List[nodes.field_list] = [] - admonitions: List[Node] = [] + fieldLists: list[nodes.field_list] = [] + admonitions: list[Node] = [] def pullup(node, typ, dest): for n in node.traverse(typ): @@ -911,8 +917,8 @@ def pullup(node, typ, dest): # collapse retvals into a single return field if len(fieldLists) != 0 and sphinx.version_info[0:2] < (4, 3): - others: List[nodes.field] = [] - retvals: List[nodes.field] = [] + others: list[nodes.field] = [] + retvals: list[nodes.field] = [] f: nodes.field fn: nodes.field_name fb: nodes.field_body @@ -924,12 +930,12 @@ def pullup(node, typ, dest): else: others.append(f) if len(retvals) != 0: - items: List[nodes.paragraph] = [] + items: list[nodes.paragraph] = [] for fn, fb in retvals: # we created the retvals before, so we made this prefix assert fn.astext().startswith("returns ") val = nodes.strong("", fn.astext()[8:]) - # assumption from visit_docparamlist: fb is a single paragraph or nothing + # assumption from visit_Node_docParamListType: fb is a single paragraph or nothing assert len(fb) <= 1, fb bodyNodes = [val, nodes.Text(" -- ")] if len(fb) == 1: @@ -996,23 +1002,24 @@ def render_declaration(self, node, declaration=None, description=None, **kwargs) contentnode.extend(description) return nodes - def visit_doxygen(self, node) -> List[Node]: - nodelist: List[Node] = [] + def visit_Node_DoxygenTypeIndex(self, node: parser.Node_DoxygenTypeIndex) -> list[Node]: + nodelist: list[Node] = [] # Process all the compound children - for n in node.get_compound(): + for n in node.compound: nodelist.extend(self.render(n)) return nodelist - def visit_doxygendef(self, node) -> List[Node]: - return self.render(node.compounddef) + def visit_Node_DoxygenType(self, node: parser.Node_DoxygenType) -> list[Node]: + assert len(node.compounddef) == 1 + return self.render(node.compounddef[0]) - def visit_union(self, node) -> List[Node]: + def visit_union(self, node) -> list[Node]: # Read in the corresponding xml file and process file_data = self.compound_parser.parse(node.refid) nodeDef = file_data.compounddef - self.context = cast(RenderContext, self.context) + assert self.context is not None parent_context = self.context.create_child_context(file_data) new_context = parent_context.create_child_context(nodeDef) @@ -1036,12 +1043,12 @@ def content(contentnode): nodes = self.handle_declaration(nodeDef, declaration, content_callback=content) return nodes - def visit_class(self, node) -> List[Node]: + def visit_class(self, node) -> list[Node]: # Read in the corresponding xml file and process file_data = self.compound_parser.parse(node.refid) nodeDef = file_data.compounddef - self.context = cast(RenderContext, self.context) + assert self.context is not None parent_context = self.context.create_child_context(file_data) new_context = parent_context.create_child_context(nodeDef) @@ -1102,12 +1109,12 @@ def content(contentnode) -> None: return nodes[1][1].children return nodes - def visit_namespace(self, node) -> List[Node]: + def visit_namespace(self, node) -> list[Node]: # Read in the corresponding xml file and process file_data = self.compound_parser.parse(node.refid) nodeDef = file_data.compounddef - self.context = cast(RenderContext, self.context) + assert self.context is not None parent_context = self.context.create_child_context(file_data) new_context = parent_context.create_child_context(file_data.compounddef) @@ -1136,7 +1143,7 @@ def content(contentnode): ) return nodes - def visit_compound(self, node, render_empty_node=True, **kwargs) -> List[Node]: + def visit_compound(self, node, render_empty_node=True, **kwargs) -> list[Node]: # Read in the corresponding xml file and process file_data = self.compound_parser.parse(node.refid) @@ -1157,7 +1164,7 @@ def get_node_info(file_data): if not dom or dom in ("c", "cpp", "py", "cs"): return self.visit_namespace(node) - self.context = cast(RenderContext, self.context) + assert self.context is not None parent_context = self.context.create_child_context(file_data) new_context = parent_context.create_child_context(file_data.compounddef) rendered_data = self.render(file_data, parent_context) @@ -1218,9 +1225,9 @@ def render_signature(file_data, doxygen_target, name, kind): contentnode.extend(rendered_data) return nodes - def visit_file(self, node) -> List[Node]: + def visit_file(self, node) -> list[Node]: def render_signature(file_data, doxygen_target, name, kind): - self.context = cast(RenderContext, self.context) + assert self.context is not None options = self.context.directive_args[2] if "content-only" in options: @@ -1260,47 +1267,53 @@ def render_signature(file_data, doxygen_target, name, kind): # If this list is edited, also change the sections option documentation for # the doxygen(auto)file directive in documentation/source/file.rst. sections = [ - ("user-defined", "User Defined"), - ("public-type", "Public Types"), - ("public-func", "Public Functions"), - ("public-attrib", "Public Members"), - ("public-slot", "Public Slots"), - ("signal", "Signals"), - ("dcop-func", "DCOP Function"), - ("property", "Properties"), - ("event", "Events"), - ("public-static-func", "Public Static Functions"), - ("public-static-attrib", "Public Static Attributes"), - ("protected-type", "Protected Types"), - ("protected-func", "Protected Functions"), - ("protected-attrib", "Protected Attributes"), - ("protected-slot", "Protected Slots"), - ("protected-static-func", "Protected Static Functions"), - ("protected-static-attrib", "Protected Static Attributes"), - ("package-type", "Package Types"), - ("package-func", "Package Functions"), - ("package-attrib", "Package Attributes"), - ("package-static-func", "Package Static Functions"), - ("package-static-attrib", "Package Static Attributes"), - ("private-type", "Private Types"), - ("private-func", "Private Functions"), - ("private-attrib", "Private Members"), - ("private-slot", "Private Slots"), - ("private-static-func", "Private Static Functions"), - ("private-static-attrib", "Private Static Attributes"), - ("friend", "Friends"), - ("related", "Related"), - ("define", "Defines"), - ("prototype", "Prototypes"), - ("typedef", "Typedefs"), - ("concept", "Concepts"), - ("enum", "Enums"), - ("func", "Functions"), - ("var", "Variables"), + (parser.DoxSectionKind.user_defined, "User Defined"), + (parser.DoxSectionKind.public_type, "Public Types"), + (parser.DoxSectionKind.public_func, "Public Functions"), + (parser.DoxSectionKind.public_attrib, "Public Members"), + (parser.DoxSectionKind.public_slot, "Public Slots"), + (parser.DoxSectionKind.signal, "Signals"), + (parser.DoxSectionKind.dcop_func, "DCOP Function"), + (parser.DoxSectionKind.property, "Properties"), + (parser.DoxSectionKind.event, "Events"), + (parser.DoxSectionKind.public_static_func, "Public Static Functions"), + (parser.DoxSectionKind.public_static_attrib, "Public Static Attributes"), + (parser.DoxSectionKind.protected_type, "Protected Types"), + (parser.DoxSectionKind.protected_func, "Protected Functions"), + (parser.DoxSectionKind.protected_attrib, "Protected Attributes"), + (parser.DoxSectionKind.protected_slot, "Protected Slots"), + (parser.DoxSectionKind.protected_static_func, "Protected Static Functions"), + (parser.DoxSectionKind.protected_static_attrib, "Protected Static Attributes"), + (parser.DoxSectionKind.package_type, "Package Types"), + (parser.DoxSectionKind.package_func, "Package Functions"), + (parser.DoxSectionKind.package_attrib, "Package Attributes"), + (parser.DoxSectionKind.package_static_func, "Package Static Functions"), + (parser.DoxSectionKind.package_static_attrib, "Package Static Attributes"), + (parser.DoxSectionKind.private_type, "Private Types"), + (parser.DoxSectionKind.private_func, "Private Functions"), + (parser.DoxSectionKind.private_attrib, "Private Members"), + (parser.DoxSectionKind.private_slot, "Private Slots"), + (parser.DoxSectionKind.private_static_func, "Private Static Functions"), + (parser.DoxSectionKind.private_static_attrib, "Private Static Attributes"), + (parser.DoxSectionKind.friend, "Friends"), + (parser.DoxSectionKind.related, "Related"), + (parser.DoxSectionKind.define, "Defines"), + (parser.DoxSectionKind.prototype, "Prototypes"), + (parser.DoxSectionKind.typedef, "Typedefs"), + #(parser.DoxSectionKind.concept, "Concepts"), + (parser.DoxSectionKind.enum, "Enums"), + (parser.DoxSectionKind.func, "Functions"), + (parser.DoxSectionKind.var, "Variables"), ] - def visit_compounddef(self, node) -> List[Node]: - self.context = cast(RenderContext, self.context) + def render_iterable(self, iterable: Iterable) -> list[Node]: + output: list[Node] = [] + for entry in iterable: + output.extend(self.render(entry)) + return output + + def visit_Node_compounddefType(self, node: parser.Node_compounddefType) -> list[Node]: + assert self.context is not None options = self.context.directive_args[2] section_order = None if "sections" in options: @@ -1308,7 +1321,7 @@ def visit_compounddef(self, node) -> List[Node]: membergroup_order = None if "membergroups" in options: membergroup_order = {sec: i for i, sec in enumerate(options["membergroups"].split(" "))} - nodemap: Dict[int, List[Node]] = {} + nodemap: dict[int, list[Node]] = {} def addnode(kind, lam): if section_order is None: @@ -1318,14 +1331,14 @@ def addnode(kind, lam): if "members-only" not in options: if "allow-dot-graphs" in options: - addnode("incdepgraph", lambda: self.render_optional(node.get_incdepgraph())) - addnode("invincdepgraph", lambda: self.render_optional(node.get_invincdepgraph())) + addnode("incdepgraph", lambda: self.render_optional(node.incdepgraph)) + addnode("invincdepgraph", lambda: self.render_optional(node.invincdepgraph)) addnode( - "inheritancegraph", lambda: self.render_optional(node.get_inheritancegraph()) + "inheritancegraph", lambda: self.render_optional(node.inheritancegraph) ) addnode( "collaborationgraph", - lambda: self.render_optional(node.get_collaborationgraph()), + lambda: self.render_optional(node.collaborationgraph), ) addnode("briefdescription", lambda: self.render_optional(node.briefdescription)) @@ -1347,7 +1360,7 @@ def render_derivedcompoundref(node): "derivedcompoundref", lambda: render_derivedcompoundref(node.derivedcompoundref) ) - section_nodelists: Dict[str, List[Node]] = {} + section_nodelists: dict[str, list[Node]] = {} # Get all sub sections for sectiondef in node.sectiondef: @@ -1368,21 +1381,22 @@ def render_derivedcompoundref(node): # We store the nodes as a list against the kind in a dictionary as the kind can be # 'user-edited' and that can repeat so this allows us to collect all the 'user-edited' # entries together - section_nodelists.setdefault(kind, []).append(rst_node) + section_nodelists.setdefault(kind.value, []).append(rst_node) # Order the results in an appropriate manner for kind, _ in self.sections: - addnode(kind, lambda: section_nodelists.get(kind, [])) + addnode(kind, lambda: section_nodelists.get(kind.value, [])) # Take care of innerclasses addnode("innerclass", lambda: self.render_iterable(node.innerclass)) addnode("innernamespace", lambda: self.render_iterable(node.innernamespace)) if "inner" in options: - for node in node.innergroup: - file_data = self.compound_parser.parse(node.refid) - inner = file_data.compounddef - addnode("innergroup", lambda: self.visit_compounddef(inner)) + for cnode in node.innergroup: + file_data = self.compound_parser.parse(cnode.refid) + assert len(file_data.compounddef) == 1 + inner = file_data.compounddef[0] + addnode("innergroup", lambda: self.visit_Node_compounddefType(inner)) nodelist = [] for _, nodes_ in sorted(nodemap.items()): @@ -1392,8 +1406,8 @@ def render_derivedcompoundref(node): section_titles = dict(sections) - def visit_sectiondef(self, node) -> List[Node]: - self.context = cast(RenderContext, self.context) + def visit_Node_sectiondefType(self, node: parser.Node_sectiondefType) -> list[Node]: + assert self.context is not None options = self.context.directive_args[2] node_list = [] node_list.extend(self.render_optional(node.description)) @@ -1429,12 +1443,12 @@ def visit_sectiondef(self, node) -> List[Node]: classes=["breathe-sectiondef-title"], ids=["breathe-section-title-" + idtext], ) - res: List[Node] = [rubric] + res: list[Node] = [rubric] return res + node_list return [] - def visit_docreftext(self, node) -> List[Node]: - nodelist = self.render_iterable(node.content_) + def visit_Node_docRefTextType(self, node: parser.Node_docRefTextType|parser.Node_incType) -> list[Node]: + nodelist = self.render_iterable(node) if hasattr(node, "para"): nodelist.extend(self.render_iterable(node.para)) @@ -1453,16 +1467,16 @@ def visit_docreftext(self, node) -> List[Node]: ] return nodelist - def visit_docheading(self, node) -> List[Node]: + def visit_Node_docHeadingTyp(self, node: parser.Node_docHeadingType) -> list[Node]: """Heading renderer. Renders embedded headlines as emphasized text. Different heading levels are not supported. """ - nodelist = self.render_iterable(node.content_) + nodelist = self.render_iterable(node) return [nodes.emphasis("", "", *nodelist)] - def visit_docpara(self, node) -> List[Node]: + def visit_Node_docParaType(self, node: parser.Node_docParaType) -> list[Node]: """ tags in the Doxygen output tend to contain either text or a single other tag of interest. So whilst it looks like we're combined descriptions and program listings and @@ -1478,7 +1492,7 @@ def visit_docpara(self, node) -> List[Node]: contentNodeCands = self.render_iterable(node.content) # if there are consecutive nodes.Text we should collapse them # and rerender them to ensure the right paragraphifaction - contentNodes: List[Node] = [] + contentNodes: list[Node] = [] for n in contentNodeCands: if len(contentNodes) != 0 and isinstance(contentNodes[-1], nodes.Text): if isinstance(n, nodes.Text): @@ -1526,11 +1540,10 @@ def visit_docpara(self, node) -> List[Node]: return [nodes.paragraph("", "", *nodelist)] - def visit_docparblock(self, node) -> List[Node]: - return self.render_iterable(node.para) + visit_Node_docParBlockType = render_iterable - def visit_docblockquote(self, node) -> List[Node]: - nodelist = self.render_iterable(node.para) + def visit_Node_docBlockQuoteType(self, node: parser.Node_docBlockQuoteType) -> list[Node]: + nodelist = self.render_iterable(node) # catch block quote attributions here; the tag is the only identifier, # and it is nested within a subsequent tag if nodelist and nodelist[-1].astext().startswith("—"): @@ -1540,24 +1553,26 @@ def visit_docblockquote(self, node) -> List[Node]: nodelist[-1] = nodes.attribution("", text) return [nodes.block_quote("", classes=[], *nodelist)] - def visit_docimage(self, node) -> List[Node]: + def visit_Node_docImageType(self, node: parser.Node_docImageType) -> list[Node]: """Output docutils image node using name attribute from xml as the uri""" path_to_image = node.name - if not url_re.match(path_to_image): + if path_to_image is None: + path_to_image = '' + elif not url_re.match(path_to_image): path_to_image = self.project_info.sphinx_abs_path_to_file(path_to_image) options = {"uri": path_to_image} return [nodes.image("", **options)] - def visit_docurllink(self, node) -> List[Node]: + def visit_Node_docURLLink(self, node: parser.Node_docURLLink) -> list[Node]: """Url Link Renderer""" - nodelist = self.render_iterable(node.content_) + nodelist = self.render_iterable(node) return [nodes.reference("", "", refuri=node.url, *nodelist)] - def visit_docmarkup(self, node) -> List[Node]: - nodelist = self.render_iterable(node.content_) + def visit_Node_docMarkupType(self, node: parser.Node_docMarkupType) -> list[Node]: + nodelist = self.render_iterable(node) creator: Type[TextElement] = nodes.inline if node.type_ == "emphasis": creator = nodes.emphasis @@ -1575,7 +1590,7 @@ def visit_docmarkup(self, node) -> List[Node]: print("Warning: does not currently handle 'small' text display") return [creator("", "", *nodelist)] - def visit_docsectN(self, node) -> List[Node]: + def visit_Node_docSect1Type(self, node: parser.Node_docSect1Type | parser.Node_docSect2Type | parser.Node_docSect3Type) -> list[Node]: """ Docutils titles are defined by their level inside the document so the proper structure is only guaranteed by the Doxygen XML. @@ -1587,10 +1602,13 @@ def visit_docsectN(self, node) -> List[Node]: section["ids"].append(self.get_refid(node.id)) section += nodes.title(node.title, node.title) section += self.create_doxygen_target(node) - section += self.render_iterable(node.content_) + section += self.render_iterable(node) return [section] + + visit_Node_docSect2Type = visit_Node_docSect1Type + visit_Node_docSect3Type = visit_Node_docSect1Type - def visit_docsimplesect(self, node) -> List[Node]: + def visit_Node_docSimpleSectType(self, node: parser.Node_docSimpleSectType) -> list[Node]: """Other Type documentation such as Warning, Note, Returns, etc""" # for those that should go into a field list, just render them as that, @@ -1630,12 +1648,11 @@ def visit_docsimplesect(self, node) -> List[Node]: return [nodes.definition_list_item("", term, definition)] - def visit_doctitle(self, node) -> List[Node]: - return self.render_iterable(node.content_) + visit_Node_docTitleType = render_iterable - def visit_docformula(self, node) -> List[Node]: - nodelist: List[Node] = [] - for item in node.content_: + def visit_Node_docFormulaType(self, node: parser.Node_docFormulaType) -> list[Node]: + nodelist: list[Node] = [] + for item in node: latex = item.getValue() docname = self.state.document.settings.env.docname # Strip out the doxygen markup that slips through @@ -1657,8 +1674,8 @@ def visit_docformula(self, node) -> List[Node]: ) return nodelist - def visit_listing(self, node) -> List[Node]: - nodelist: List[Node] = [] + def visit_Node_listingType(self, node: parser.Node_listingType) -> list[Node]: + nodelist: list[Node] = [] for i, item in enumerate(node.codeline): # Put new lines between the lines if i: @@ -1672,11 +1689,9 @@ def visit_listing(self, node) -> List[Node]: block["language"] = node.domain return [block] - def visit_codeline(self, node) -> List[Node]: - return self.render_iterable(node.highlight) + visit_Node_codelineType = render_iterable - def visit_highlight(self, node) -> List[Node]: - return self.render_iterable(node.content_) + visit_Node_highlightType = render_iterable def _nested_inline_parse_with_titles(self, content, node) -> str: """ @@ -1702,7 +1717,7 @@ def _nested_inline_parse_with_titles(self, content, node) -> str: self.state.memo.title_styles = surrounding_title_styles self.state.memo.section_level = surrounding_section_level - def visit_verbatim(self, node) -> List[Node]: + def visit_verbatim(self, node) -> list[Node]: if not node.text.strip().startswith("embed:rst"): # Remove trailing new lines. Purely subjective call from viewing results text = node.text.rstrip() @@ -1765,13 +1780,13 @@ def visit_verbatim(self, node) -> List[Node]: return [rst_node] - def visit_inc(self, node: compoundsuper.incType) -> List[Node]: + def visit_Node_incType(self, node: parser.Node_incType) -> list[Node]: if not self.app.config.breathe_show_include: return [] - compound_link: List[Node] = [nodes.Text(node.content_[0].getValue())] - if node.get_refid(): - compound_link = self.visit_docreftext(node) + compound_link: list[Node] = [nodes.Text(''.join(node))] + if node.refid: + compound_link = self.visit_Node_docRefTextType(node) if node.local == "yes": text = [nodes.Text('#include "'), *compound_link, nodes.Text('"')] else: @@ -1779,28 +1794,28 @@ def visit_inc(self, node: compoundsuper.incType) -> List[Node]: return [nodes.container("", nodes.emphasis("", "", *text))] - def visit_ref(self, node: compoundsuper.refType) -> List[Node]: + def visit_Node_refType(self, node: parser.Node_refType) -> list[Node]: def get_node_info(file_data): - name = node.content_[0].getValue() + name = ''.join(node) name = name.rsplit("::", 1)[-1] return name, file_data.compounddef.kind return self.visit_compound(node, False, get_node_info=get_node_info) - def visit_doclistitem(self, node) -> List[Node]: + def visit_Node_docListItemType(self, node: parser.Node_docListItemType) -> list[Node]: """List item renderer. Render all the children depth-first. Upon return expand the children node list into a docutils list-item. """ - nodelist = self.render_iterable(node.para) + nodelist = self.render_iterable(node) return [nodes.list_item("", *nodelist)] numeral_kind = ["arabic", "loweralpha", "lowerroman", "upperalpha", "upperroman"] - def render_unordered(self, children) -> List[Node]: + def render_unordered(self, children) -> list[Node]: nodelist_list = nodes.bullet_list("", *children) return [nodelist_list] - def render_enumerated(self, children, nesting_level) -> List[Node]: + def render_enumerated(self, children, nesting_level) -> list[Node]: nodelist_list = nodes.enumerated_list("", *children) idx = nesting_level % len(SphinxRenderer.numeral_kind) nodelist_list["enumtype"] = SphinxRenderer.numeral_kind[idx] @@ -1808,7 +1823,7 @@ def render_enumerated(self, children, nesting_level) -> List[Node]: nodelist_list["suffix"] = "." return [nodelist_list] - def visit_doclist(self, node) -> List[Node]: + def visit_Node_docListType(self, node: parser.Node_docListType) -> list[Node]: """List renderer The specifics of the actual list rendering are handled by the @@ -1825,9 +1840,11 @@ def visit_doclist(self, node) -> List[Node]: return self.render_enumerated(children=val, nesting_level=self.nesting_level) return [] - def visit_compoundref(self, node) -> List[Node]: - nodelist = self.render_iterable(node.content_) - refid = self.get_refid(node.refid) + def visit_Node_compoundRefType(self, node: parser.Node_compoundRefType) -> list[Node]: + nodelist = self.render_iterable(node) + refid = None + if node.refid is not None: + refid = self.get_refid(node.refid) if refid is not None: nodelist = [ addnodes.pending_xref( @@ -1842,7 +1859,7 @@ def visit_compoundref(self, node) -> List[Node]: ] return nodelist - def visit_docxrefsect(self, node) -> List[Node]: + def visit_Node_docXRefSectType(self, node: parser.Node_docXRefSectType) -> list[Node]: assert self.app.env is not None signode = addnodes.desc_signature() @@ -1871,29 +1888,28 @@ def visit_docxrefsect(self, node) -> List[Node]: return [descnode] - def visit_docvariablelist(self, node) -> List[Node]: - output: List[Node] = [] - for varlistentry, listitem in zip(node.varlistentries, node.listitems): + def visit_Node_docVariableListType(self, node: parser.Node_docVariableListType) -> list[Node]: + output: list[Node] = [] + for n in node: descnode = addnodes.desc() descnode["objtype"] = "varentry" descnode["domain"] = self.get_domain() if self.get_domain() else "cpp" signode = addnodes.desc_signature() - signode += self.render_optional(varlistentry) + signode += self.render_optional(n.varlistentry) descnode += signode contentnode = addnodes.desc_content() - contentnode += self.render_iterable(listitem.para) + contentnode += self.render_iterable(n.listitem.para) descnode += contentnode output.append(descnode) return output - def visit_docvarlistentry(self, node) -> List[Node]: - content = node.term.content_ - return self.render_iterable(content) + def visit_Node_docVarListEntryType(self, node: parser.Node_docVarListEntryType) -> list[Node]: + return self.render_iterable(node.term) - def visit_docanchor(self, node) -> List[Node]: + def visit_Node_docAnchorType(self, node: parser.Node_docAnchorType) -> list[Node]: return list(self.create_doxygen_target(node)) - def visit_docentry(self, node) -> List[Node]: + def visit_Node_docEntryType(self, node: parser.Node_docEntryType) -> list[Node]: col = nodes.entry() col += self.render_iterable(node.para) if node.thead == "yes": @@ -1904,7 +1920,7 @@ def visit_docentry(self, node) -> List[Node]: col["morecols"] = int(node.colspan) - 1 return [col] - def visit_docrow(self, node) -> List[Node]: + def visit_Node_docRowType(self, node: parser.Node_docRowType) -> list[Node]: row = nodes.row() cols = self.render_iterable(node.entry) elem: Union[nodes.thead, nodes.tbody] @@ -1916,7 +1932,7 @@ def visit_docrow(self, node) -> List[Node]: elem.append(row) return [elem] - def visit_doctable(self, node) -> List[Node]: + def visit_Node_docTableType(self, node: parser.Node_docTableType) -> list[Node]: table = nodes.table() table["classes"] += ["colwidths-auto"] tgroup = nodes.tgroup(cols=node.cols) @@ -1927,11 +1943,11 @@ def visit_doctable(self, node) -> List[Node]: table += tgroup rows = self.render_iterable(node.row) - # this code depends on visit_docrow(), and expects the same elements used to + # this code depends on visit_Node_docRowType(), and expects the same elements used to # "envelop" rows there, namely thead and tbody (eg it will need to be updated # if Doxygen one day adds support for tfoot) - tags: Dict[str, List] = {row.starttag(): [] for row in rows} + tags: dict[str, list] = {row.starttag(): [] for row in rows} for row in rows: tags[row.starttag()].append(row.next_node()) @@ -1947,16 +1963,14 @@ def merge_row_types(root, elem, elems): return [table] - def visit_mixedcontainer(self, node: compoundsuper.MixedContainer) -> List[Node]: + def visit_mixedcontainer(self, node: compoundsuper.MixedContainer) -> list[Node]: return self.render_optional(node.getValue()) - def visit_description(self, node) -> List[Node]: - return self.render_iterable(node.content_) + visit_Node_descriptionType = render_iterable - def visit_linkedtext(self, node) -> List[Node]: - return self.render_iterable(node.content_) + visit_Node_linkedTextType = render_iterable - def visit_function(self, node) -> List[Node]: + def visit_function(self, node) -> list[Node]: dom = self.get_domain() if not dom or dom in ("c", "cpp", "py", "cs"): names = self.get_qualification() @@ -2006,7 +2020,7 @@ def visit_function(self, node) -> List[Node]: # Get full function signature for the domain directive. param_list = [] for param in node.param: - self.context = cast(RenderContext, self.context) + assert self.context is not None param = self.context.mask_factory.mask(param) param_decl = get_param_decl(param) param_list.append(param_decl) @@ -2033,7 +2047,7 @@ def visit_function(self, node) -> List[Node]: if node.virt == "pure-virtual": signature += "= 0" - self.context = cast(RenderContext, self.context) + assert self.context is not None self.context.directive_args[1] = [signature] nodes = self.run_domain_directive(node.kind, self.context.directive_args[1]) @@ -2061,7 +2075,7 @@ def visit_function(self, node) -> List[Node]: finder.content.extend(self.description(node)) return nodes - def visit_define(self, node) -> List[Node]: + def visit_define(self, node) -> list[Node]: declaration = node.name if node.param: declaration += "(" @@ -2079,7 +2093,7 @@ def add_definition(declarator: Declarator) -> None: return self.handle_declaration(node, declaration, declarator_callback=add_definition) - def visit_enum(self, node) -> List[Node]: + def visit_enum(self, node) -> list[Node]: def content(contentnode): contentnode.extend(self.description(node)) values = nodes.emphasis("", nodes.Text("Values:")) @@ -2107,14 +2121,14 @@ def content(contentnode): node, declaration, obj_type=obj_type, content_callback=content ) - def visit_enumvalue(self, node) -> List[Node]: + def visit_Node_enumvalueType(self, node: parser.Node_enumvalueType) -> list[Node]: if self.app.config.breathe_show_enumvalue_initializer: declaration = node.name + self.make_initializer(node) else: declaration = node.name return self.handle_declaration(node, declaration, obj_type="enumvalue") - def visit_typedef(self, node) -> List[Node]: + def visit_typedef(self, node) -> list[Node]: type_ = "".join(n.astext() for n in self.render(node.get_type())) names = self.get_qualification() names.append(node.get_name()) @@ -2135,7 +2149,7 @@ def visit_typedef(self, node) -> List[Node]: def make_initializer(self, node) -> str: initializer = node.initializer - signature: List[Node] = [] + signature: list[Node] = [] if initializer: render_nodes = self.render(initializer) # Do not append separators for paragraphs. @@ -2148,7 +2162,7 @@ def make_initializer(self, node) -> str: signature.extend(render_nodes) return "".join(n.astext() for n in signature) - def visit_variable(self, node) -> List[Node]: + def visit_variable(self, node) -> list[Node]: names = self.get_qualification() names.append(node.name) name = self.join_nested_name(names) @@ -2198,7 +2212,7 @@ def visit_variable(self, node) -> List[Node]: else: return self.render_declaration(node, declaration) - def visit_friendclass(self, node) -> List[Node]: + def visit_friendclass(self, node) -> list[Node]: dom = self.get_domain() assert not dom or dom == "cpp" @@ -2224,13 +2238,13 @@ def visit_friendclass(self, node) -> List[Node]: return [desc] def visit_templateparam( - self, node: compound.paramTypeSub, *, insertDeclNameByParsing: bool = False - ) -> List[Node]: - nodelist: List[Node] = [] + self, node: parser.Node_paramType, *, insertDeclNameByParsing: bool = False + ) -> list[Node]: + nodelist: list[Node] = [] # Parameter type - if node.type_: - type_nodes = self.render(node.type_) + if node.type: + type_nodes = self.render(node.type) # Render keywords as annotations for consistency with the cpp domain. if len(type_nodes) > 0 and isinstance(type_nodes[0], str): first_node = type_nodes[0] @@ -2296,8 +2310,8 @@ def visit_templateparam( return nodelist - def visit_templateparamlist(self, node: compound.templateparamlistTypeSub) -> List[Node]: - nodelist: List[Node] = [] + def visit_Node_templateparamlistType(self, node: parser.Node_templateparamlistType) -> list[Node]: + nodelist: list[Node] = [] self.output_defname = False for i, item in enumerate(node.param): if i: @@ -2306,7 +2320,7 @@ def visit_templateparamlist(self, node: compound.templateparamlistTypeSub) -> Li self.output_defname = True return nodelist - def visit_docparamlist(self, node) -> List[Node]: + def visit_Node_docParamListType(self, node: parser.Node_docParamListType) -> list[Node]: """Parameter/Exception/TemplateParameter documentation""" fieldListName = { @@ -2322,7 +2336,7 @@ def visit_docparamlist(self, node) -> List[Node]: for item in node.parameteritem: # TODO: does item.parameternamelist really have more than 1 parametername? assert len(item.parameternamelist) <= 1, item.parameternamelist - nameNodes: List[Node] = [] + nameNodes: list[Node] = [] parameterDirectionNodes = [] if len(item.parameternamelist) != 0: paramNameNodes = item.parameternamelist[0].parametername @@ -2371,7 +2385,7 @@ def visit_docparamlist(self, node) -> List[Node]: fieldList += field return [fieldList] - def visit_docdot(self, node) -> List[Node]: + def visit_docdot(self, node) -> list[Node]: """Translate node from doxygen's dot command to sphinx's graphviz directive.""" graph_node = graphviz() if node.content_ and node.content_[0].getValue().rstrip("\n"): @@ -2390,7 +2404,7 @@ def visit_docdot(self, node) -> List[Node]: return [nodes.figure("", graph_node, caption_node)] return [graph_node] - def visit_docdotfile(self, node) -> List[Node]: + def visit_docdotfile(self, node) -> list[Node]: """Translate node from doxygen's dotfile command to sphinx's graphviz directive.""" dotcode = "" dot_file_path = node.name # type: str @@ -2429,7 +2443,7 @@ def visit_docdotfile(self, node) -> List[Node]: return [nodes.figure("", graph_node, caption_node)] return [graph_node] - def visit_docgraph(self, node: compoundsuper.graphType) -> List[Node]: + def visit_Node_graphType(self, node: parser.Node_graphType) -> list[Node]: """Create a graph (generated by doxygen - not user-defined) from XML using dot syntax.""" # use graphs' legend from doxygen (v1.9.1) @@ -2452,10 +2466,10 @@ def visit_docgraph(self, node: compoundsuper.graphType) -> List[Node]: dot += " font=Helvetica padding=2]\n" dot += ' edge [color="#1414CE"]\n' relations = [] - for g_node in node.get_node(): - dot += ' "%s" [label="%s"' % (g_node.get_id(), g_node.get_label()) - dot += ' tooltip="%s"' % g_node.get_label() - if g_node.get_id() == "1": + for g_node in node.node: + dot += ' "%s" [label="%s"' % (g_node.id, g_node.label) + dot += ' tooltip="%s"' % g_node.label + if g_node.id == "1": # the disabled grey color is used in doxygen to indicate that the URL is # not set (for the compound in focus). Setting this here doesn't allow # further customization. Maybe remove this since URL is not used? @@ -2468,13 +2482,13 @@ def visit_docgraph(self, node: compoundsuper.graphType) -> List[Node]: # dot += ' URL="%s"' % g_node.get_link().get_refid() dot += "]\n" for child_node in g_node.childnode: - edge = f' "{g_node.get_id()}"' - edge += f' -> "{child_node.get_refid()}" [' - edge += f"dir={node.get_direction()} " + edge = f' "{g_node.id}"' + edge += f' -> "{child_node.refid}" [' + edge += f"dir={node.direction} " # edge labels don't appear in XML (bug?); use tooltip in meantime - edge += 'tooltip="%s"' % child_node.get_relation() - if child_node.get_relation() in edge_colors.keys(): - edge += ' color="#%s"' % edge_colors.get(child_node.get_relation()) + edge += 'tooltip="%s"' % child_node.relation.value + if child_node.relation.value in edge_colors.keys(): + edge += ' color="#%s"' % edge_colors.get(child_node.relation.value) edge += "]\n" relations.append(edge) for relation in relations: @@ -2486,24 +2500,24 @@ def visit_docgraph(self, node: compoundsuper.graphType) -> List[Node]: graph_node["code"] = dot graph_node["align"] = "center" graph_node["options"] = {} - caption = node.get_caption() + caption = node.caption # if caption is first node in a figure, then everything that follows is # considered a caption. Use a paragraph followed by a figure to center the # graph. This may have illegible side effects for very large graphs. caption_node = nodes.paragraph("", nodes.Text(caption)) return [caption_node, nodes.figure("", graph_node)] - def visit_unknown(self, node) -> List[Node]: + def visit_unknown(self, node) -> list[Node]: """Visit a node of unknown type.""" return [] - def dispatch_compound(self, node) -> List[Node]: + def visit_Node_CompoundType(self, node: parser.Node_CompoundType) -> list[Node]: """Dispatch handling of a compound node to a suitable visit method.""" - if node.kind in ["file", "dir", "page", "example", "group"]: + if node.kind in [parser.CompoundKind.file, parser.CompoundKind.dir, parser.CompoundKind.page, parser.CompoundKind.example, parser.CompoundKind.group]: return self.visit_file(node) return self.visit_compound(node) - def dispatch_memberdef(self, node) -> List[Node]: + def dispatch_Node_memberdefType(self, node: parser.Node_memberdefType) -> list[Node]: """Dispatch handling of a memberdef node to a suitable visit method.""" if node.kind in ("function", "signal", "slot") or ( node.kind == "friend" and node.argsstring @@ -2528,58 +2542,13 @@ def dispatch_memberdef(self, node) -> List[Node]: return self.visit_friendclass(node) return self.render_declaration(node, update_signature=self.update_signature) - # A mapping from node types to corresponding dispatch and visit methods. - # Dispatch methods, as the name suggest, dispatch nodes to appropriate visit - # methods based on node attributes such as kind. - methods: Dict[str, Callable[["SphinxRenderer", Any], List[Node]]] = { - "doxygen": visit_doxygen, - "doxygendef": visit_doxygendef, - "compound": dispatch_compound, - "compounddef": visit_compounddef, - "sectiondef": visit_sectiondef, - "memberdef": dispatch_memberdef, - "docreftext": visit_docreftext, - "docheading": visit_docheading, - "docpara": visit_docpara, - "docparblock": visit_docparblock, - "docimage": visit_docimage, - "docurllink": visit_docurllink, - "docmarkup": visit_docmarkup, - "docsect1": visit_docsectN, - "docsect2": visit_docsectN, - "docsect3": visit_docsectN, - "docsimplesect": visit_docsimplesect, - "doctitle": visit_doctitle, - "docformula": visit_docformula, - "listing": visit_listing, - "codeline": visit_codeline, - "highlight": visit_highlight, - "verbatim": visit_verbatim, - "inc": visit_inc, - "ref": visit_ref, - "doclist": visit_doclist, - "doclistitem": visit_doclistitem, - "enumvalue": visit_enumvalue, - "linkedtext": visit_linkedtext, - "compoundref": visit_compoundref, - "mixedcontainer": visit_mixedcontainer, - "description": visit_description, - "templateparamlist": visit_templateparamlist, - "docparamlist": visit_docparamlist, - "docxrefsect": visit_docxrefsect, - "docvariablelist": visit_docvariablelist, - "docvarlistentry": visit_docvarlistentry, - "docanchor": visit_docanchor, - "doctable": visit_doctable, - "docrow": visit_docrow, - "docentry": visit_docentry, - "docdotfile": visit_docdotfile, - "docdot": visit_docdot, - "graph": visit_docgraph, - "docblockquote": visit_docblockquote, - } + #methods: dict[str, Callable[[SphinxRenderer, Any], list[Node]]] = { + # "verbatim": visit_verbatim, + # "docdotfile": visit_docdotfile, + # "docdot": visit_docdot, + #} - def render_string(self, node: str) -> List[Node]: + def render_string(self, node: str) -> list[Node]: # Skip any nodes that are pure whitespace # Probably need a better way to do this as currently we're only doing # it skip whitespace between higher-level nodes, but this will also @@ -2608,32 +2577,26 @@ def render_string(self, node: str) -> List[Node]: return [nodes.Text(node)] return [] - def render(self, node, context: Optional[RenderContext] = None) -> List[Node]: + def render(self, node, context: Optional[RenderContext] = None) -> list[Node]: + assert self.context is not None if context is None: - self.context = cast(RenderContext, self.context) context = self.context.create_child_context(node) with WithContext(self, context): - result: List[Node] = [] - self.context = cast(RenderContext, self.context) + result: list[Node] = [] if not self.filter_.allow(self.context.node_stack): pass elif isinstance(node, str): result = self.render_string(node) else: - method = SphinxRenderer.methods.get(node.node_type, SphinxRenderer.visit_unknown) + assert type(node).__name__.startswith('Node_') + method = getattr(SphinxRenderer, 'visit_'+type(node).__name__, SphinxRenderer.visit_unknown) result = method(self, node) return result - def render_optional(self, node) -> List[Node]: + def render_optional(self, node) -> list[Node]: """Render a node that can be None.""" return self.render(node) if node else [] - def render_iterable(self, iterable: List) -> List[Node]: - output: List[Node] = [] - for entry in iterable: - output.extend(self.render(entry)) - return output - def setup(app: Sphinx) -> None: app.add_config_value("breathe_debug_trace_directives", False, "") diff --git a/tests/data/arange.xml b/tests/data/arange.xml index 00482ce2..8f687786 100644 --- a/tests/data/arange.xml +++ b/tests/data/arange.xml @@ -1,3 +1,6 @@ + + + Tensor @@ -13,6 +16,7 @@ options {} + Tensor @@ -39,6 +43,7 @@ c10::optional< bool > pin_memory + Tensor @@ -58,6 +63,7 @@ options {} + Tensor @@ -88,6 +94,7 @@ c10::optional< bool > pin_memory + Tensor @@ -111,6 +118,7 @@ options {} + Tensor @@ -145,5 +153,8 @@ c10::optional< bool > pin_memory + + + diff --git a/tests/data/ellipsis.xml b/tests/data/ellipsis.xml index 1e5e0b00..68861277 100644 --- a/tests/data/ellipsis.xml +++ b/tests/data/ellipsis.xml @@ -1,3 +1,6 @@ + + + double @@ -15,6 +18,8 @@ ... + - + + diff --git a/tests/test_renderer.py b/tests/test_renderer.py index 8f159a18..c38317e1 100644 --- a/tests/test_renderer.py +++ b/tests/test_renderer.py @@ -4,18 +4,11 @@ import sphinx.locale import sphinx.addnodes import sphinx.environment -from breathe.parser import ( - Node_compounddefType, - Node_linkedTextType, - Node_memberdefType, - Node_paramType, - Node_refType, - MixedContainer, -) +from breathe import parser from breathe.renderer.sphinxrenderer import SphinxRenderer from breathe.renderer.filter import OpenFilter -from docutils import frontend, nodes, parsers, utils - +import docutils.parsers.rst +from docutils import frontend, nodes, utils from sphinx.testing.fixtures import ( test_params, app_params, @@ -24,10 +17,23 @@ sphinx_test_tempdir, rootdir, ) -from sphinx.testing.path import path sphinx.locale.init([], "") - +COMMON_ARGS_memberdefType = { + 'id': '', + 'prot': parser.DoxProtectionKind.public, + 'static': False, + 'location': parser.Node_locationType( + bodyend = 0, + bodyfile = '', + bodystart = 0, + column = 0, + declcolumn = 0, + declfile = '', + declline = 0, + file = '', + line = 0) +} @pytest.fixture(scope="function") def app(test_params, app_params, make_app, shared_result): @@ -36,7 +42,7 @@ def app(test_params, app_params, make_app, shared_result): """ args, kwargs = app_params assert "srcdir" in kwargs - kwargs["srcdir"].makedirs(exist_ok=True) + kwargs["srcdir"].mkdir(parents=True,exist_ok=True) (kwargs["srcdir"] / "conf.py").write_text("") app_ = make_app(*args, **kwargs) yield app_ @@ -52,36 +58,6 @@ def app(test_params, app_params, make_app, shared_result): shared_result.store(test_params["shared_result"], app_) -class WrappedDoxygenNode: - """ - A base class for test wrappers of Doxygen nodes. It allows setting all attributes via keyword arguments - in the constructor. - """ - - def __init__(self, cls, *args, **kwargs): - if cls: - cls.__init__(self, args) - for name, value in kwargs.items(): - if not hasattr(self, name): - raise AttributeError("invalid attribute " + name) - setattr(self, name, value) - - -class WrappedMixedContainer(MixedContainer, WrappedDoxygenNode): - """A test wrapper of Doxygen mixed container.""" - - def __init__(self, **kwargs): - MixedContainer.__init__(self, None, None, None, None) - WrappedDoxygenNode.__init__(self, None, **kwargs) - - -class WrappedCompoundDef(compounddefTypeSub, WrappedDoxygenNode): - """A test wrapper of Doxygen compound definition.""" - - def __init__(self, **kwargs): - WrappedDoxygenNode.__init__(self, compounddefTypeSub, **kwargs) - - class MockMemo: def __init__(self): self.title_styles = "" @@ -98,7 +74,7 @@ def __init__(self, app): env.temp_data["docname"] = "mock-doc" env.temp_data["breathe_project_info_factory"] = ProjectInfoFactory(app) env.temp_data["breathe_parser_factory"] = DoxygenParserFactory(app) - settings = frontend.OptionParser(components=(parsers.rst.Parser,)).get_default_values() + settings = frontend.OptionParser(components=(docutils.parsers.rst.Parser,)).get_default_values() settings.env = env self.document = utils.new_document("", settings) @@ -290,16 +266,17 @@ def render( def test_render_func(app): - member_def = Node_memberdefType( - kind="function", + member_def = parser.Node_memberdefType( + kind=parser.DoxMemberKind.function, definition="void foo", - type_="void", + type=parser.Node_linkedTextType(["void"]), name="foo", argsstring="(int)", - virt="non-virtual", + virt=parser.DoxVirtualKind.non_virtual, param=[ - Node_paramType(type_=Node_linkedTextType([WrappedMixedContainer(value="int")])) + parser.Node_paramType(type=parser.Node_linkedTextType(["int"])) ], + **COMMON_ARGS_memberdefType ) signature = find_node(render(app, member_def), "desc_signature") assert signature.astext().startswith("void") @@ -321,28 +298,29 @@ def test_render_func(app): def test_render_typedef(app): - member_def = Node_memberdefType( - kind="typedef", definition="typedef int foo", type_="int", name="foo" + member_def = parser.Node_memberdefType( + kind=parser.DoxMemberKind.typedef, definition="typedef int foo", type=parser.Node_linkedTextType(["int"]), name="foo", **COMMON_ARGS_memberdefType ) signature = find_node(render(app, member_def), "desc_signature") assert signature.astext() == "typedef int foo" def test_render_c_typedef(app): - member_def = Node_memberdefType( - kind="typedef", definition="typedef unsigned int bar", type_="unsigned int", name="bar" + member_def = parser.Node_memberdefType( + kind=parser.DoxMemberKind.typedef, definition="typedef unsigned int bar", type=parser.Node_linkedTextType(["unsigned int"]), name="bar", **COMMON_ARGS_memberdefType ) signature = find_node(render(app, member_def, domain="c"), "desc_signature") assert signature.astext() == "typedef unsigned int bar" def test_render_c_function_typedef(app): - member_def = Node_memberdefType( - kind="typedef", + member_def = parser.Node_memberdefType( + kind=parser.DoxMemberKind.typedef, definition="typedef void* (*voidFuncPtr)(float, int)", - type_="void* (*", + type=parser.Node_linkedTextType(["void* (*"]), name="voidFuncPtr", argsstring=")(float, int)", + **COMMON_ARGS_memberdefType ) signature = find_node(render(app, member_def, domain="c"), "desc_signature") assert signature.astext().startswith("typedef void *") @@ -358,112 +336,120 @@ def test_render_c_function_typedef(app): def test_render_using_alias(app): - member_def = Node_memberdefType( - kind="typedef", definition="using foo = int", type_="int", name="foo" + member_def = parser.Node_memberdefType( + kind=parser.DoxMemberKind.typedef, definition="using foo = int", type=parser.Node_linkedTextType(["int"]), name="foo", **COMMON_ARGS_memberdefType ) signature = find_node(render(app, member_def), "desc_signature") assert signature.astext() == "using foo = int" def test_render_const_func(app): - member_def = Node_memberdefType( - kind="function", + member_def = parser.Node_memberdefType( + kind=parser.DoxMemberKind.function, definition="void f", - type_="void", + type=parser.Node_linkedTextType(["void"]), name="f", argsstring="() const", - virt="non-virtual", - const="yes", + virt=parser.DoxVirtualKind.non_virtual, + const=True, + **COMMON_ARGS_memberdefType ) signature = find_node(render(app, member_def), "desc_signature") assert "_CPPv2NK1fEv" in signature["ids"] def test_render_lvalue_func(app): - member_def = Node_memberdefType( - kind="function", + member_def = parser.Node_memberdefType( + kind=parser.DoxMemberKind.function, definition="void f", - type_="void", + type=parser.Node_linkedTextType(["void"]), name="f", argsstring="() &", - virt="non-virtual", - refqual="lvalue", + virt=parser.DoxVirtualKind.non_virtual, + refqual=parser.DoxRefQualifierKind.lvalue, + **COMMON_ARGS_memberdefType ) signature = find_node(render(app, member_def), "desc_signature") assert signature.astext().endswith("&") def test_render_rvalue_func(app): - member_def = Node_memberdefType( - kind="function", + member_def = parser.Node_memberdefType( + kind=parser.DoxMemberKind.function, definition="void f", - type_="void", + type=parser.Node_linkedTextType(["void"]), name="f", argsstring="() &&", - virt="non-virtual", - refqual="rvalue", + virt=parser.DoxVirtualKind.non_virtual, + refqual=parser.DoxRefQualifierKind.rvalue, + **COMMON_ARGS_memberdefType ) signature = find_node(render(app, member_def), "desc_signature") assert signature.astext().endswith("&&") def test_render_const_lvalue_func(app): - member_def = Node_memberdefType( - kind="function", + member_def = parser.Node_memberdefType( + kind=parser.DoxMemberKind.function, definition="void f", - type_="void", + type=parser.Node_linkedTextType(["void"]), name="f", argsstring="() const &", - virt="non-virtual", - const="yes", - refqual="lvalue", + virt=parser.DoxVirtualKind.non_virtual, + const=True, + refqual=parser.DoxRefQualifierKind.lvalue, + **COMMON_ARGS_memberdefType ) signature = find_node(render(app, member_def), "desc_signature") assert signature.astext().endswith("const &") def test_render_const_rvalue_func(app): - member_def = Node_memberdefType( - kind="function", + member_def = parser.Node_memberdefType( + kind=parser.DoxMemberKind.function, definition="void f", - type_="void", + type=parser.Node_linkedTextType(["void"]), name="f", argsstring="() const &&", - virt="non-virtual", - const="yes", - refqual="rvalue", + virt=parser.DoxVirtualKind.non_virtual, + const=True, + refqual=parser.DoxRefQualifierKind.rvalue, + **COMMON_ARGS_memberdefType ) signature = find_node(render(app, member_def), "desc_signature") assert signature.astext().endswith("const &&") def test_render_variable_initializer(app): - member_def = Node_memberdefType( - kind="variable", + member_def = parser.Node_memberdefType( + kind=parser.DoxMemberKind.variable, definition="const int EOF", - type_="const int", + type=parser.Node_linkedTextType(["const int"]), name="EOF", - initializer=WrappedMixedContainer(value="= -1"), + initializer=parser.Node_linkedTextType(["= -1"]), + **COMMON_ARGS_memberdefType ) signature = find_node(render(app, member_def), "desc_signature") assert signature.astext() == "const int EOF = -1" def test_render_define_initializer(app): - member_def = Node_memberdefType( - kind="define", + member_def = parser.Node_memberdefType( + kind=parser.DoxMemberKind.define, name="MAX_LENGTH", - initializer=Node_linkedTextType([WrappedMixedContainer(value="100")]), + initializer=parser.Node_linkedTextType(["100"]), + **COMMON_ARGS_memberdefType ) signature_w_initializer = find_node( render(app, member_def, show_define_initializer=True), "desc_signature" ) assert signature_w_initializer.astext() == "MAX_LENGTH 100" - member_def_no_show = Node_memberdefType( - kind="define", + member_def_no_show = parser.Node_memberdefType( + kind=parser.DoxMemberKind.define, name="MAX_LENGTH_NO_INITIALIZER", - initializer=Node_linkedTextType([WrappedMixedContainer(value="100")]), + initializer=parser.Node_linkedTextType(["100"]), + **COMMON_ARGS_memberdefType ) signature_wo_initializer = find_node( @@ -474,7 +460,7 @@ def test_render_define_initializer(app): def test_render_define_no_initializer(app): sphinx.addnodes.setup(app) - member_def = Node_memberdefType(kind="define", name="USE_MILK") + member_def = parser.Node_memberdefType(kind=parser.DoxMemberKind.define, name="USE_MILK", **COMMON_ARGS_memberdefType) signature = find_node(render(app, member_def), "desc_signature") assert signature.astext() == "USE_MILK" @@ -483,14 +469,16 @@ def test_render_innergroup(app): refid = "group__innergroup" mock_compound_parser = MockCompoundParser( { - refid: Node_compounddefType( - kind="group", compoundname="InnerGroup", briefdescription="InnerGroup" + refid: parser.Node_compounddefType( + kind=parser.DoxCompoundKind.group, compoundname="InnerGroup", briefdescription=parser.Node_descriptionType(["InnerGroup"]), + id='', prot=parser.DoxProtectionKind.public ) } ) - ref = Node_refType(["InnerGroup"], refid=refid) - compound_def = Node_compounddefType( - kind="group", compoundname="OuterGroup", briefdescription="OuterGroup", innergroup=[ref] + ref = parser.Node_refType(["InnerGroup"], refid=refid) + compound_def = parser.Node_compounddefType( + kind=parser.DoxCompoundKind.group, compoundname="OuterGroup", briefdescription=parser.Node_descriptionType(["OuterGroup"]), innergroup=[ref], + id='', prot=parser.DoxProtectionKind.public ) assert all( el.astext() != "InnerGroup" @@ -529,20 +517,17 @@ def get_directive(app): def get_matches(datafile): - from breathe.parser.compoundsuper import sectiondefType from xml.dom import minidom argsstrings = [] with open(os.path.join(os.path.dirname(__file__), "data", datafile)) as fid: xml = fid.read() - doc = minidom.parseString(xml) - - sectiondef = sectiondefType.factory() - for child in doc.documentElement.childNodes: - sectiondef.buildChildren(child, "memberdef") - if getattr(child, "tagName", None) == "memberdef": - # Get the argsstring function declaration - argsstrings.append(child.getElementsByTagName("argsstring")[0].childNodes[0].data) + doc = parser.parse_str(xml) + assert isinstance(doc.value,parser.Node_DoxygenType) + + sectiondef = doc.value.compounddef[0].sectiondef[0] + for child in sectiondef.memberdef: + if child.argsstring: argsstrings.append(child.argsstring) matches = [[m, sectiondef] for m in sectiondef.memberdef] return argsstrings, matches diff --git a/tests/test_utils.py b/tests/test_utils.py index 5951dca8..5e43b3bd 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -2,8 +2,7 @@ from xml.dom import minidom from breathe.renderer.sphinxrenderer import get_param_decl, get_definition_without_template_args -from breathe.parser.compoundsuper import memberdefType -from breathe import path_handler +from breathe import path_handler, parser class TestUtils(TestCase): @@ -11,7 +10,12 @@ def test_param_decl(self): # From xml from: examples/specific/parameters.h xml = """ + + + + + x int a @@ -42,14 +46,17 @@ def test_param_decl(self): r [3] + + + + """ - doc = minidom.parseString(xml) + doc = parser.parse_str(xml) + assert isinstance(doc.value,parser.Node_DoxygenType) - memberdef = memberdefType.factory() - for child in doc.documentElement.childNodes: - memberdef.buildChildren(child, "param") + memberdef = doc.value.compounddef[0].sectiondef[0].memberdef[0] self.assertEqual(get_param_decl(memberdef.param[0]), "int a") self.assertEqual(get_param_decl(memberdef.param[1]), "float b") diff --git a/xml_parser_generator/module_template.c b/xml_parser_generator/module_template.c index 83ed3b00..0697b896 100644 --- a/xml_parser_generator/module_template.c +++ b/xml_parser_generator/module_template.c @@ -41,6 +41,7 @@ is broken into chunks. */ enum { CLASS_FROZEN_LIST = 0, + CLASS_FROZEN_LIST_ITR, CLASS_TAGGED_VALUE, //% for type in types|select('used_directly') CLASS__{$ type $}, @@ -296,8 +297,8 @@ static tagged_value *create_tagged_value(module_state *state) { static void tagged_value_dealloc(tagged_value *tv) { PyTypeObject *t = Py_TYPE(tv); PyObject_GC_UnTrack(tv); - Py_XDECREF(tv->values[0]); - Py_XDECREF(tv->values[1]); + Py_CLEAR(tv->values[0]); + Py_CLEAR(tv->values[1]); ((freefunc)PyType_GetSlot(t,Py_tp_free))(tv); Py_DECREF(t); } @@ -364,6 +365,14 @@ typedef struct { PyObject **content; } frozen_list; +/* A type doesn't satisfy collections.abc.Iterable unless is has an __iter__ +method */ +typedef struct { + PyObject_HEAD + size_t i; + frozen_list *fl; +} frozen_list_itr; + static void init_frozen_list(frozen_list *fl) { fl->size = 0; fl->capacity = 0; @@ -410,9 +419,11 @@ static PyObject **frozen_list_push_tagged_value(module_state *state,tagged_union static void frozen_list_dealloc(frozen_list *obj) { size_t i; + size_t size = obj->size; PyTypeObject *t = Py_TYPE(obj); PyObject_GC_UnTrack(obj); - for(i=0; isize; ++i) Py_XDECREF(obj->content[i]); + obj->size = 0; + for(i=0; icontent[i]); if(obj->content) PyMem_Free(obj->content); ((freefunc)PyType_GetSlot(t,Py_tp_free))(obj); Py_DECREF(t); @@ -458,11 +469,11 @@ static int frozen_list_fill(frozen_list *fl,PyObject *iterable) { return 0; } -void raise_no_keyword_allowed(const char *func) { +static void raise_no_keyword_allowed(const char *func) { PyErr_Format(PyExc_TypeError,"%s does not take any keyword arguments",func); } -PyObject *frozen_list_tp_new(PyTypeObject *subtype,PyObject *args,PyObject *kwds) { +static PyObject *frozen_list_tp_new(PyTypeObject *subtype,PyObject *args,PyObject *kwds) { frozen_list *r; if(kwds != NULL && PyDict_Size(kwds)) { raise_no_keyword_allowed("FrozenList.__new__"); @@ -484,7 +495,25 @@ PyObject *frozen_list_tp_new(PyTypeObject *subtype,PyObject *args,PyObject *kwds return (PyObject*)r; } +static PyObject *frozen_list_tp_iter(frozen_list *self) { + PyObject *m; + frozen_list_itr *r; + + m = PyState_FindModule(&module_def); + assert(m); + r = PyObject_GC_New(frozen_list_itr,((module_state*)PyModule_GetState(m))->classes[CLASS_FROZEN_LIST_ITR]); + if(r == NULL) return NULL; + r->i = 0; + r->fl = NULL; + if(self->size) { + r->fl = self; + Py_INCREF(self); + } + return (PyObject*)r; +} + static PyType_Slot frozen_list_slots[] = { + {Py_tp_iter,frozen_list_tp_iter}, {Py_tp_new,frozen_list_tp_new}, {Py_tp_dealloc,frozen_list_dealloc}, {Py_sq_length,frozen_list_size}, @@ -493,6 +522,55 @@ static PyType_Slot frozen_list_slots[] = { {0,NULL} }; +static PyObject *frozen_list_itr_tp_iter(PyObject *self) { + Py_INCREF(self); + return self; +} + +static PyObject *frozen_list_itr_tp_next(frozen_list_itr *self) { + PyObject *r; + + if(self->fl == NULL) return NULL; + + assert(self->i < self->fl->size); + r = self->fl->content[self->i++]; + if(self->i == self->fl->size) Py_CLEAR(self->fl); + Py_INCREF(r); + return r; +} + +static PyObject *frozen_list_itr_length_hint(frozen_list_itr *self,PyObject *Py_UNUSED(x)) { + if(self->fl == NULL) return PyLong_FromLong(0); + return PyLong_FromLong(self->fl->size - self->i); +} + +static PyMethodDef frozen_list_itr_methods[] = { + {"__length_hint__",(PyCFunction)frozen_list_itr_length_hint,METH_NOARGS,NULL}, + {NULL} +}; + +static void frozen_list_itr_dealloc(frozen_list_itr *obj) { + PyTypeObject *t = Py_TYPE(obj); + PyObject_GC_UnTrack(obj); + Py_CLEAR(obj->fl); + ((freefunc)PyType_GetSlot(t,Py_tp_free))(obj); + Py_DECREF(t); +} + +static int frozen_list_itr_traverse(frozen_list_itr *obj,visitproc visit,void *arg) { + if(obj->fl == NULL) return 0; + return visit((PyObject*)obj->fl,arg); +} + +static PyType_Slot frozen_list_itr_slots[] = { + {Py_tp_iter,frozen_list_itr_tp_iter}, + {Py_tp_iternext,frozen_list_itr_tp_next}, + {Py_tp_methods,frozen_list_itr_methods}, + {Py_tp_dealloc,frozen_list_itr_dealloc}, + {Py_tp_traverse,frozen_list_itr_traverse}, + {0,NULL} +}; + typedef enum { //% for n in element_names @@ -619,7 +697,7 @@ static void node_tagonly_common_dealloc(node_tagonly_common *obj,size_t field_co size_t i; PyTypeObject *t = Py_TYPE(obj); PyObject_GC_UnTrack(obj); - for(i=0; ifields[i]); + for(i=0; ifields[i]); ((freefunc)PyType_GetSlot(t,Py_tp_free))(obj); Py_DECREF(t); } @@ -655,10 +733,12 @@ static node_list_common *create_node_list_common(module_state *state,size_t clas static void node_list_common_dealloc(node_list_common *obj,size_t field_count) { size_t i; + size_t size = obj->base.size; PyTypeObject *t = Py_TYPE(obj); PyObject_GC_UnTrack(obj); - for(i=0; ifields[i]); - for(i=0; ibase.size; ++i) Py_XDECREF(obj->base.content[i]); + obj->base.size = 0; + for(i=0; ifields[i]); + for(i=0; ibase.content[i]); if(obj->base.content) PyMem_Free(obj->base.content); ((freefunc)PyType_GetSlot(t,Py_tp_free))(obj); Py_DECREF(t); @@ -876,6 +956,7 @@ static PyMemberDef tuple_item_members__{$ type $}[] = { }; PyObject *tuple_item_tp_new__{$ type $}(PyTypeObject *subtype,PyObject *args,PyObject *kwds) { + size_t i; tuple_item *r = (tuple_item*)((allocfunc)PyType_GetSlot(subtype,Py_tp_alloc))(subtype,0); if(r == NULL) return NULL; @@ -888,9 +969,11 @@ PyObject *tuple_item_tp_new__{$ type $}(PyTypeObject *subtype,PyObject *args,PyO &r->fields[{$ loop.index0 $}]{$ ',' if not loop.last $} //% endfor )) { + for(i=0; ifields[i] = NULL; Py_DECREF(r); return NULL; } + for(i=0; ifields[i]); return (PyObject*)r; } @@ -1107,6 +1190,7 @@ static int node_set_py_field(module_state *state,PyObject **field,PyObject *valu } *field = value; + Py_INCREF(value); return 1; } @@ -1307,8 +1391,10 @@ static int node_class_child__{$ type $}(parse_state *state,{$ 'PyObject **fields //% if type is used_directly static PyObject *node_class_tp_new__{$ type $}(PyTypeObject *subtype,PyObject *args,PyObject *kwds) { static const char *func_name = "Node_{$ type $}.__new__"; +//% if type|field_count PyObject *module; module_state *state; +//% endif node_{$ common_affix(type) $}_common *n; //% if type|field_count PyObject *key, *value; @@ -1329,12 +1415,15 @@ static PyObject *node_class_tp_new__{$ type $}(PyTypeObject *subtype,PyObject *a } //% endif +//% if type|field_count module = PyState_FindModule(&module_def); assert(module); state = PyModule_GetState(module); +//% endif - n = create_node_{$ common_affix(type) $}_common(state,CLASS__{$ type $},FIELD_COUNT__{$ type $}); + n = (node_{$ common_affix(type) $}_common*)((allocfunc)PyType_GetSlot(subtype,Py_tp_alloc))(subtype,0); if(n == NULL) return NULL; + init_node_{$ common_affix(type) $}_common(n,FIELD_COUNT__{$ type $}); //% if type is list_e if(frozen_list_fill(&n->base,PyTuple_GetItem(args,0))) { @@ -1557,6 +1646,7 @@ typedef struct { static spec_and_is_list class_specs[] = { {{FULL_MODULE_STR ".FrozenList",sizeof(frozen_list),0,Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC|Py_TPFLAGS_SEQUENCE,frozen_list_slots},0}, + {{FULL_MODULE_STR ".FrozenListItr",sizeof(frozen_list_itr),0,Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_GC,frozen_list_itr_slots},0}, {{FULL_MODULE_STR ".TaggedValue",sizeof(tagged_value),0,Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_GC|Py_TPFLAGS_SEQUENCE,tagged_value_slots},0}, //% for type in types|select('used_directly') {{FULL_MODULE_STR ".Node_{$ type $}",offsetof(node_{$ common_affix(type) $}_common,fields){% if type is has_fields %} + sizeof(PyObject*)*FIELD_COUNT__{$ type $}{% endif %},0,Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_GC,node_class_slots__{$ type $}},{$ '1' if type is list_e else '0' $}}, @@ -1658,17 +1748,17 @@ static int parse_file(parse_state *state,PyObject *file) { } static int parse_str(parse_state *state,PyObject *data) { - int r = 0; + int r = -1; const char *s; Py_ssize_t len; PyObject *tmp = NULL; if(PyUnicode_Check(data)) { + char *s_tmp; tmp = PyUnicode_AsUTF8String(data); if(tmp == NULL) return -1; - s = PyByteArray_AsString(data); - assert(s); - len = PyByteArray_Size(data); + if(PyBytes_AsStringAndSize(tmp,&s_tmp,&len) < 0) goto end; + s = s_tmp; XML_SetEncoding(state->parser,"utf-8"); } else if(PyBytes_Check(data)) { char *s_tmp; @@ -1686,7 +1776,6 @@ static int parse_str(parse_state *state,PyObject *data) { while(len > EXPAT_CHUNK_SIZE) { if(XML_Parse(state->parser,s,EXPAT_CHUNK_SIZE,0) == XML_STATUS_ERROR) { raise_expat_error(state); - r = -1; goto end; } @@ -1696,10 +1785,11 @@ static int parse_str(parse_state *state,PyObject *data) { if(XML_Parse(state->parser,s,(int)len,1) == XML_STATUS_ERROR) { raise_expat_error(state); - r = -1; goto end; } + r = 0; + end: Py_XDECREF(tmp); return r; @@ -1775,12 +1865,12 @@ static PyMethodDef module_functions[] = { {"parse_file",impl_parse_file,METH_O,NULL}, {NULL}}; -static PyObject *load_enum_base(void) { +static PyObject *load_class_from_mod(const char *cls_name,const char *mod_name) { PyObject *cls; - PyObject *enum_m = PyImport_ImportModule("enum"); - if(enum_m == NULL) return NULL; - cls = PyObject_GetAttrString(enum_m,"Enum"); - Py_DECREF(enum_m); + PyObject *m = PyImport_ImportModule(mod_name); + if(m == NULL) return NULL; + cls = PyObject_GetAttrString(m,cls_name); + Py_DECREF(m); return cls; } @@ -1858,6 +1948,7 @@ static int create_enum( static int module_exec(PyObject *module) { PyObject *enum_base=NULL; + PyObject *frozen_list_bases=NULL; size_t i=0; size_t tu_i=0; size_t char_i=0; @@ -1882,7 +1973,7 @@ static int module_exec(PyObject *module) { } //% endif - enum_base = load_enum_base(); + enum_base = load_class_from_mod("Enum","enum"); if(enum_base == NULL) goto error; //% for type in types|select('enumeration_t') if(create_enum( @@ -1898,13 +1989,16 @@ static int module_exec(PyObject *module) { enum_base = NULL; for(; iclasses[CLASS_FROZEN_LIST]); - Py_INCREF(state->classes[CLASS_FROZEN_LIST]); - state->classes[i] = (PyTypeObject*)PyType_FromSpecWithBases(&class_specs[i].spec,bases); - Py_DECREF(bases); + if(i == CLASS_FROZEN_LIST) { + state->classes[i] = (PyTypeObject*)PyType_FromSpec(&class_specs[i].spec); + + frozen_list_bases = PyTuple_New(1); + if(frozen_list_bases == NULL) goto error; + PyTuple_SetItem(frozen_list_bases,0,(PyObject*)state->classes[i]); + Py_INCREF(state->classes[i]); + } else if(class_specs[i].list_base) { + assert(frozen_list_bases != NULL); + state->classes[i] = (PyTypeObject*)PyType_FromSpecWithBases(&class_specs[i].spec,frozen_list_bases); } else { state->classes[i] = (PyTypeObject*)PyType_FromSpec(&class_specs[i].spec); } @@ -1916,10 +2010,13 @@ static int module_exec(PyObject *module) { Py_INCREF(state->classes[i]); } + Py_DECREF(frozen_list_bases); + return 0; error: Py_XDECREF(enum_base); + Py_XDECREF(frozen_list_bases); //% for type in types|select('enumeration_t') if(state->enum_values__{$ type $}[0] != NULL) decref_array(state->enum_values__{$ type $},ENUM_VALUE_COUNT__{$ type $}); //% endfor diff --git a/xml_parser_generator/stubs_template.pyi b/xml_parser_generator/stubs_template.pyi index a67a36f5..023cf70c 100644 --- a/xml_parser_generator/stubs_template.pyi +++ b/xml_parser_generator/stubs_template.pyi @@ -8,10 +8,15 @@ U = TypeVar('U') class SupportsRead(Protocol): def read(self, length: int, /) -> bytes | bytearray: ... +class FrozenListItr(Generic[T]): + def __iter__(self) -> FrozenListItr: ... + def __next__(self) -> T: ... + class FrozenList(Generic[T]): def __init__(self, items: Iterable[T]): ... def __len__(self) -> int: ... def __getitem__(self, i: SupportsIndex) -> T: ... + def __iter__(self) -> FrozenListItr[T]: ... class TaggedValue(Generic[T, U]): name: T @@ -98,7 +103,7 @@ ListItem_{$ type $} = ( {$ "invalid content type"|error $} //% endif //% if type is used_directly -class Node_{$ type $}: +class Node_{$ type $}{$ '(FrozenList['~list_item_type~'])' if type is list_e $}: {$ emit_fields(type) $} def __init__(self{$ ', __items: Iterable['~list_item_type~'], /' if type is list_e $} {%- if type|field_count -%}, * @@ -106,12 +111,6 @@ class Node_{$ type $}: {%- for f in type.all_fields() if f is optional %}, {$ f.py_name $}: {$ f.py_type(true) $} = ...{% endfor -%} {%- endif %}): ... -//% if type is list_e - def __len__(self) -> int: ... - - def __getitem__(self,i: SupportsIndex) -> {$ list_item_type $}: ... - -//% endif //% elif type is enumeration_t class {$ type $}(enum.Enum): //% for entry in type.children From 7e1a87abc68eaae86d2f07e94407ec07de91414b Mon Sep 17 00:00:00 2001 From: Rouslan Korneychuk Date: Mon, 20 Nov 2023 19:56:57 -0500 Subject: [PATCH 19/65] Replaced many references to old classes --- breathe/directives/__init__.py | 2 +- breathe/directives/class_like.py | 10 +- breathe/directives/function.py | 20 +- breathe/directives/index.py | 10 +- breathe/finder/__init__.py | 8 +- breathe/finder/compound.py | 16 +- breathe/finder/factory.py | 30 +- breathe/finder/index.py | 16 +- breathe/parser.py | 48 +- breathe/project.py | 2 +- breathe/renderer/__init__.py | 2 +- breathe/renderer/filter.py | 201 +++---- breathe/renderer/mask.py | 17 +- breathe/renderer/sphinxrenderer.py | 664 +++++++++++++++--------- tests/test_renderer.py | 8 +- tests/test_utils.py | 1 - xml_parser_generator/module_template.c | 172 ++++-- xml_parser_generator/schema.json | 2 +- xml_parser_generator/stubs_template.pyi | 15 +- 19 files changed, 772 insertions(+), 472 deletions(-) diff --git a/breathe/directives/__init__.py b/breathe/directives/__init__.py index 18a0f7f3..9b2bcdab 100644 --- a/breathe/directives/__init__.py +++ b/breathe/directives/__init__.py @@ -115,7 +115,7 @@ def render( ) except ParserError as e: return format_parser_error( - "doxygenclass", e.error, e.filename, self.state, self.lineno, True + "doxygenclass", e.message, e.filename, self.state, self.lineno, True ) except FileIOError as e: return format_parser_error( diff --git a/breathe/directives/class_like.py b/breathe/directives/class_like.py index aae52cab..e5e70112 100644 --- a/breathe/directives/class_like.py +++ b/breathe/directives/class_like.py @@ -7,7 +7,10 @@ from docutils.nodes import Node from docutils.parsers.rst.directives import unchanged_required, unchanged, flag -from typing import Any, List +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from breathe import parser class _DoxygenClassLikeDirective(BaseDirective): @@ -30,7 +33,7 @@ class _DoxygenClassLikeDirective(BaseDirective): } has_content = False - def run(self) -> List[Node]: + def run(self) -> list[Node]: name = self.arguments[0] try: @@ -47,8 +50,7 @@ def run(self) -> List[Node]: finder_filter = self.filter_factory.create_compound_finder_filter(name, self.kind) - # TODO: find a more specific type for the Doxygen nodes - matches: List[Any] = [] + matches: list[parser.Node] = [] finder.filter_(finder_filter, matches) if len(matches) == 0: diff --git a/breathe/directives/function.py b/breathe/directives/function.py index b46c54a1..40326318 100644 --- a/breathe/directives/function.py +++ b/breathe/directives/function.py @@ -1,7 +1,7 @@ from breathe.directives import BaseDirective from breathe.exception import BreatheError from breathe.file_state_cache import MTimeError -from breathe.parser import ParserError, FileIOError +from breathe import parser from breathe.project import ProjectError from breathe.renderer import format_parser_error, RenderContext from breathe.renderer.sphinxrenderer import WithContext @@ -217,11 +217,11 @@ def _create_function_signature( self.parser_factory.create_compound_parser(project_info), filter_, ) - except ParserError as e: + except parser.ParserError as e: return format_parser_error( - "doxygenclass", e.error, e.filename, self.state, self.lineno, True + "doxygenclass", e.message, e.filename, self.state, self.lineno, True ) - except FileIOError as e: + except parser.FileIOError as e: return format_parser_error( "doxygenclass", e.error, e.filename, self.state, self.lineno, False ) @@ -230,20 +230,20 @@ def _create_function_signature( node = node_stack[0] with WithContext(object_renderer, context): # this part should be kept in sync with visit_function in sphinxrenderer - name = node.get_name() + name = node.name # assume we are only doing this for C++ declarations declaration = " ".join( [ object_renderer.create_template_prefix(node), - "".join(n.astext() for n in object_renderer.render(node.get_type())), + "".join(n.astext() for n in object_renderer.render(node.type)), name, - node.get_argsstring(), + node.argsstring, ] ) - parser = cpp.DefinitionParser( + cpp_parser = cpp.DefinitionParser( declaration, location=self.get_source_info(), config=self.config ) - ast = parser.parse_declaration("function", "function") + ast = cpp_parser.parse_declaration("function", "function") return str(ast) def _resolve_function(self, matches, args: Optional[cpp.ASTParametersQualifiers], project_info): @@ -260,7 +260,7 @@ def _resolve_function(self, matches, args: Optional[cpp.ASTParametersQualifiers] {"no-link": ""}, project_info, self.state.document ) filter_ = self.filter_factory.create_outline_filter(text_options) - mask_factory = MaskFactory({"param": NoParameterNamesMask}) + mask_factory = MaskFactory({parser.Node_paramType: NoParameterNamesMask}) # Override the directive args for this render directive_args = self.directive_args[:] diff --git a/breathe/directives/index.py b/breathe/directives/index.py index eb39cf6a..3a2e846d 100644 --- a/breathe/directives/index.py +++ b/breathe/directives/index.py @@ -9,8 +9,6 @@ from docutils.nodes import Node from docutils.parsers.rst.directives import unchanged_required, flag -from typing import List - class RootDataObject: node_type = "root" @@ -23,12 +21,12 @@ class _BaseIndexDirective(BaseDirective): # information is present in the Directive class from the docutils framework that we'd have to # pass way too much stuff to a helper object to be reasonable. - def handle_contents(self, project_info): + def handle_contents(self, project_info) -> list[Node]: try: finder = self.finder_factory.create_finder(project_info) except ParserError as e: return format_parser_error( - self.name, e.error, e.filename, self.state, self.lineno, True + self.name, e.message, e.filename, self.state, self.lineno, True ) except FileIOError as e: return format_parser_error(self.name, e.error, e.filename, self.state, self.lineno) @@ -56,7 +54,7 @@ def handle_contents(self, project_info): node_list = object_renderer.render(context.node_stack[0], context) except ParserError as e: return format_parser_error( - self.name, e.error, e.filename, self.state, self.lineno, True + self.name, e.message, e.filename, self.state, self.lineno, True ) except FileIOError as e: return format_parser_error(self.name, e.error, e.filename, self.state, self.lineno) @@ -99,7 +97,7 @@ class AutoDoxygenIndexDirective(_BaseIndexDirective): } has_content = False - def run(self) -> List[Node]: + def run(self) -> list[Node]: """Extract the project info from the auto project info store and pass it to the helper method. """ diff --git a/breathe/finder/__init__.py b/breathe/finder/__init__.py index 7cd34aea..c1da13fe 100644 --- a/breathe/finder/__init__.py +++ b/breathe/finder/__init__.py @@ -1,12 +1,14 @@ from __future__ import annotations -from typing import TYPE_CHECKING +from typing import Generic, TYPE_CHECKING, TypeVar if TYPE_CHECKING: from breathe.project import ProjectInfo from breathe.finder.factory import DoxygenItemFinderFactory from breathe.renderer.filter import Filter +T = TypeVar('T') + def stack(element, list_): """Stack an element on to the start of a list and return as a new list""" @@ -16,8 +18,8 @@ def stack(element, list_): return output -class ItemFinder: - def __init__(self, project_info: ProjectInfo, data_object, item_finder_factory: DoxygenItemFinderFactory): +class ItemFinder(Generic[T]): + def __init__(self, project_info: ProjectInfo, data_object: T, item_finder_factory: DoxygenItemFinderFactory): self.data_object = data_object self.item_finder_factory: DoxygenItemFinderFactory = item_finder_factory self.project_info = project_info diff --git a/breathe/finder/compound.py b/breathe/finder/compound.py index e38d2c6b..a83c44c3 100644 --- a/breathe/finder/compound.py +++ b/breathe/finder/compound.py @@ -1,17 +1,19 @@ from breathe.finder import ItemFinder, stack from breathe.renderer.filter import Filter +from breathe import parser -class DoxygenTypeSubItemFinder(ItemFinder): +class DoxygenTypeSubItemFinder(ItemFinder[parser.Node_DoxygenType]): def filter_(self, ancestors, filter_: Filter, matches) -> None: """Find nodes which match the filter. Doesn't test this node, only its children""" node_stack = stack(self.data_object, ancestors) - compound_finder = self.item_finder_factory.create_finder(self.data_object.compounddef) + assert len(self.data_object.compounddef) == 1 + compound_finder = self.item_finder_factory.create_finder(self.data_object.compounddef[0]) compound_finder.filter_(node_stack, filter_, matches) -class CompoundDefTypeSubItemFinder(ItemFinder): +class CompoundDefTypeSubItemFinder(ItemFinder[parser.Node_compounddefType]): def filter_(self, ancestors, filter_: Filter, matches) -> None: """Finds nodes which match the filter and continues checks to children""" @@ -28,7 +30,7 @@ def filter_(self, ancestors, filter_: Filter, matches) -> None: finder.filter_(node_stack, filter_, matches) -class SectionDefTypeSubItemFinder(ItemFinder): +class SectionDefTypeSubItemFinder(ItemFinder[parser.Node_sectiondefType]): def filter_(self, ancestors, filter_: Filter, matches) -> None: """Find nodes which match the filter. Doesn't test this node, only its children""" @@ -41,7 +43,7 @@ def filter_(self, ancestors, filter_: Filter, matches) -> None: finder.filter_(node_stack, filter_, matches) -class MemberDefTypeSubItemFinder(ItemFinder): +class MemberDefTypeSubItemFinder(ItemFinder[parser.Node_memberdefType]): def filter_(self, ancestors, filter_: Filter, matches) -> None: data_object = self.data_object node_stack = stack(data_object, ancestors) @@ -49,14 +51,14 @@ def filter_(self, ancestors, filter_: Filter, matches) -> None: if filter_.allow(node_stack): matches.append(node_stack) - if data_object.kind == "enum": + if data_object.kind == parser.DoxMemberKind.enum: for value in data_object.enumvalue: value_stack = stack(value, node_stack) if filter_.allow(value_stack): matches.append(value_stack) -class RefTypeSubItemFinder(ItemFinder): +class RefTypeSubItemFinder(ItemFinder[parser.Node_refType]): def filter_(self, ancestors, filter_: Filter, matches) -> None: node_stack = stack(self.data_object, ancestors) if filter_.allow(node_stack): diff --git a/breathe/finder/factory.py b/breathe/finder/factory.py index 23b04137..15576d84 100644 --- a/breathe/finder/factory.py +++ b/breathe/finder/factory.py @@ -3,7 +3,7 @@ from breathe.finder import ItemFinder from breathe.finder import index as indexfinder from breathe.finder import compound as compoundfinder -from breathe.parser import DoxygenParserFactory +from breathe import parser from breathe.project import ProjectInfo from breathe.renderer.filter import Filter @@ -16,7 +16,7 @@ class _CreateCompoundTypeSubFinder: - def __init__(self, app: Sphinx, parser_factory: DoxygenParserFactory): + def __init__(self, app: Sphinx, parser_factory: parser.DoxygenParserFactory): self.app = app self.parser_factory = parser_factory @@ -26,12 +26,12 @@ def __call__(self, project_info: ProjectInfo, *args) -> indexfinder.CompoundType class DoxygenItemFinderFactory: - def __init__(self, finders: dict[str, ItemFinderCreator], project_info: ProjectInfo): + def __init__(self, finders: dict[type[parser.Node], ItemFinderCreator], project_info: ProjectInfo): self.finders = finders self.project_info = project_info def create_finder(self, data_object) -> ItemFinder: - return self.finders[data_object.node_type](self.project_info, data_object, self) + return self.finders[type(data_object)](self.project_info, data_object, self) class _FakeParentNode: @@ -43,7 +43,7 @@ def __init__(self, root, item_finder_factory: DoxygenItemFinderFactory) -> None: self._root = root self.item_finder_factory = item_finder_factory - def filter_(self, filter_: Filter, matches) -> None: + def filter_(self, filter_: Filter, matches: list[parser.Node]) -> None: """Adds all nodes which match the filter into the matches list""" item_finder = self.item_finder_factory.create_finder(self._root) @@ -54,7 +54,7 @@ def root(self): class FinderFactory: - def __init__(self, app: Sphinx, parser_factory: DoxygenParserFactory): + def __init__(self, app: Sphinx, parser_factory: parser.DoxygenParserFactory): self.app = app self.parser_factory = parser_factory self.parser = parser_factory.create_index_parser() @@ -64,15 +64,15 @@ def create_finder(self, project_info: ProjectInfo) -> Finder: return self.create_finder_from_root(root, project_info) def create_finder_from_root(self, root, project_info: ProjectInfo) -> Finder: - finders: dict[str, ItemFinderCreator] = { - "doxygen": indexfinder.DoxygenTypeSubItemFinder, - "compound": _CreateCompoundTypeSubFinder(self.app, self.parser_factory), - "member": indexfinder.MemberTypeSubItemFinder, - "doxygendef": compoundfinder.DoxygenTypeSubItemFinder, - "compounddef": compoundfinder.CompoundDefTypeSubItemFinder, - "sectiondef": compoundfinder.SectionDefTypeSubItemFinder, - "memberdef": compoundfinder.MemberDefTypeSubItemFinder, - "ref": compoundfinder.RefTypeSubItemFinder, + finders: dict[type[parser.Node], ItemFinderCreator] = { + parser.Node_DoxygenTypeIndex: indexfinder.DoxygenTypeSubItemFinder, + parser.Node_CompoundType: _CreateCompoundTypeSubFinder(self.app, self.parser_factory), + parser.Node_MemberType: indexfinder.MemberTypeSubItemFinder, + parser.Node_DoxygenType: compoundfinder.DoxygenTypeSubItemFinder, + parser.Node_compounddefType: compoundfinder.CompoundDefTypeSubItemFinder, + parser.Node_sectiondefType: compoundfinder.SectionDefTypeSubItemFinder, + parser.Node_memberdefType: compoundfinder.MemberDefTypeSubItemFinder, + parser.Node_refType: compoundfinder.RefTypeSubItemFinder, } item_finder_factory = DoxygenItemFinderFactory(finders, project_info) return Finder(root, item_finder_factory) diff --git a/breathe/finder/index.py b/breathe/finder/index.py index 0a142289..d67e369d 100644 --- a/breathe/finder/index.py +++ b/breathe/finder/index.py @@ -1,25 +1,25 @@ from breathe.finder import ItemFinder, stack from breathe.renderer.filter import Filter, FilterFactory -from breathe.parser import DoxygenCompoundParser +from breathe import parser from sphinx.application import Sphinx from typing import Any -class DoxygenTypeSubItemFinder(ItemFinder): +class DoxygenTypeSubItemFinder(ItemFinder[parser.Node_DoxygenTypeIndex]): def filter_(self, ancestors, filter_: Filter, matches) -> None: """Find nodes which match the filter. Doesn't test this node, only its children""" - compounds = self.data_object.get_compound() + compounds = self.data_object.compound node_stack = stack(self.data_object, ancestors) for compound in compounds: compound_finder = self.item_finder_factory.create_finder(compound) compound_finder.filter_(node_stack, filter_, matches) -class CompoundTypeSubItemFinder(ItemFinder): - def __init__(self, app: Sphinx, compound_parser: DoxygenCompoundParser, *args): +class CompoundTypeSubItemFinder(ItemFinder[parser.Node_CompoundType]): + def __init__(self, app: Sphinx, compound_parser: parser.DoxygenCompoundParser, *args): super().__init__(*args) self.filter_factory = FilterFactory(app) @@ -40,7 +40,7 @@ def filter_(self, ancestors, filter_: Filter, matches) -> None: matches.append(node_stack) # Descend to member children - members = self.data_object.get_member() + members = self.data_object.member # TODO: find a more precise type for the Doxygen nodes member_matches: list[Any] = [] for member in members: @@ -55,7 +55,7 @@ def filter_(self, ancestors, filter_: Filter, matches) -> None: for member_stack in member_matches: ref_filter = self.filter_factory.create_id_filter( - "memberdef", member_stack[0].refid + parser.Node_memberdefType, member_stack[0].refid ) finder.filter_(node_stack, ref_filter, matches) else: @@ -65,7 +65,7 @@ def filter_(self, ancestors, filter_: Filter, matches) -> None: finder.filter_(node_stack, filter_, matches) -class MemberTypeSubItemFinder(ItemFinder): +class MemberTypeSubItemFinder(ItemFinder[parser.Node_memberdefType]): def filter_(self, ancestors, filter_: Filter, matches) -> None: node_stack = stack(self.data_object, ancestors) diff --git a/breathe/parser.py b/breathe/parser.py index cccc6cb2..baac33b4 100644 --- a/breathe/parser.py +++ b/breathe/parser.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from breathe import file_state_cache, path_handler from breathe.project import ProjectInfo @@ -5,18 +7,30 @@ from sphinx.application import Sphinx +from typing import overload + + class ParserError(RuntimeError): - def __init__(self, error: str, filename: str): - super().__init__(error) + def __init__(self, message: str, filename: str, lineno: int | None = None): + super().__init__(message,filename,lineno) - self.error = error - self.filename = filename + @property + def message(self) -> str: + return self.args[0] + + @property + def lineno(self) -> int | None: + return self.args[1] + + @property + def filename(self) -> str: + return self.args[2] def __str__(self): - # TODO: update _parser.ParseError to store the line number and message - # as separate fields for better formatting here - return f"file {self.filename}: {self.error}" + if self.lineno is None: + return f"file {self.filename}: {self.message}" + return f"file {self.filename}:{self.lineno}: {self.message}" class FileIOError(RuntimeError): @@ -28,7 +42,7 @@ def __init__(self, error: str, filename: str): class Parser: - def __init__(self, app: Sphinx, cache): + def __init__(self, app: Sphinx, cache: dict[str, Node_DoxygenTypeIndex | Node_DoxygenType]): self.app = app self.cache = cache @@ -43,10 +57,10 @@ def _parse_common(self,filename: str, right_tag: str) -> Node_DoxygenTypeIndex | result = parse_file(file) if result.name != right_tag: raise ParserError(f'expected "{right_tag}" root element, not "{result.name}"',filename) - self.cache[filename] = result + self.cache[filename] = result.value return result.value except ParseError as e: - raise ParserError(str(e), filename) + raise ParserError(e.message, filename, e.lineno) except IOError as e: raise FileIOError(str(e), filename) @@ -92,3 +106,17 @@ def create_index_parser(self) -> DoxygenIndexParser: def create_compound_parser(self, project_info: ProjectInfo) -> DoxygenCompoundParser: return DoxygenCompoundParser(self.app, self.cache, project_info) + + +@overload +def tag_name_value(x: TaggedValue[T, U]) -> tuple[T, U]: ... + +@overload +def tag_name_value(x: str) -> tuple[None,str]: ... + +@overload +def tag_name_value(x: TaggedValue[T, U] | str) -> tuple[T | None, U | str]: ... + +def tag_name_value(x): + if isinstance(x,str): return None,x + return x.name,x.value diff --git a/breathe/project.py b/breathe/project.py index 865236e8..dd5b6453 100644 --- a/breathe/project.py +++ b/breathe/project.py @@ -113,7 +113,7 @@ def __init__(self, app: Sphinx): # Assume general build directory is the doctree directory without the last component. # We strip off any trailing slashes so that dirname correctly drops the last part. # This can be overridden with the breathe_build_directory config variable - self._default_build_dir = os.path.dirname(app.doctreedir.rstrip(os.sep)) + self._default_build_dir = str(app.doctreedir.parent) self.project_count = 0 self.project_info_store: Dict[str, ProjectInfo] = {} self.project_info_for_auto_store: Dict[str, AutoProjectInfo] = {} diff --git a/breathe/renderer/__init__.py b/breathe/renderer/__init__.py index 497d8423..fe53a5da 100644 --- a/breathe/renderer/__init__.py +++ b/breathe/renderer/__init__.py @@ -2,7 +2,7 @@ import textwrap -def format_parser_error(name, error, filename, state, lineno, do_unicode_warning): +def format_parser_error(name: str, error: str, filename: str, state, lineno: int, do_unicode_warning: bool = False) -> list[nodes.Node]: warning = '%s: Unable to parse xml file "%s". ' % (name, filename) explanation = "Reported error: %s. " % error diff --git a/breathe/renderer/filter.py b/breathe/renderer/filter.py index 31f90ce3..4d40fc8d 100644 --- a/breathe/renderer/filter.py +++ b/breathe/renderer/filter.py @@ -22,13 +22,13 @@ hierarchies that encapsulate the required test and take a node (with its context) and return True or False. -If you wanted a test which figures out if a node has the node_type 'memberdef' you might create the -following object hierarchy: +If you wanted a test which figures out if a node has the 'kind' attribute set to 'function' you +might create the following object hierarchy: - node_is_memberdef = InFilter(AttributeAccessor(Node(), 'node_type'), ['memberdef']) + kind_is_function = InFilter(AttributeAccessor(Node(), 'kind'), ['function']) -This reads from the inside out, as get the node, then get the node_type attribute from it, and see -if the value of the attribute is in the list ['memberdef']. +This reads from the inside out, as get the node, then get the 'kind' attribute from it, and see if +the value of the attribute is in the list ['function']. The Node() is called a 'Selector'. Parent() is also a selector. It means given the current context, work with the parent of the current node rather than the node itself. This allows you to frame tests @@ -74,14 +74,14 @@ An example of a finder filter is: AndFilter( - InFilter(NodeTypeAccessor(Node()), ["compound"]), + InFilter(NodeTypeAccessor(Node()), [parser.Node_CompoundType]), InFilter(KindAccessor(Node()), ["group"]), InFilter(NameAccessor(Node()), ["mygroup"]) ) -This says, return True for all the nodes of node_type 'compound' with 'kind' set to 'group' which -have the name 'mygroup'. It returns false for everything else, but when a node matching this is -found then it is added to the matches list by the code above. +This says, return True for all the nodes of type 'parser.Node_CompoundType' with 'kind' set to +'group' which have the name 'mygroup'. It returns false for everything else, but when a node +matching this is found then it is added to the matches list by the code above. It is therefore relatively easy to write finder filters. If you have two separate node filters like the one above and you want to match on both of them then you can do: @@ -98,34 +98,34 @@ ~~~~~~~~~~~~~~~ Content filters are harder than the finder filters as they are responsible for halting the iteration -down the hierarchy if they return false. This means that if you're interested in memberdef nodes -with a particular attribute then you have to check for that but also include a clause which allows -all other non-memberdef nodes to pass through as you don't want to interrupt them. +down the hierarchy if they return false. This means that if you're interested in Node_memberdefType +nodes with a particular attribute then you have to check for that but also include a clause which +allows all other non-Node_memberdefType nodes to pass through as you don't want to interrupt them. This means you end up with filters like this: OrFilter( AndFilter( - InFilter(NodeTypeAccessor(Node()), ["compound"]), + InFilter(NodeTypeAccessor(Node()), [parser.Node_memberdefType]), InFilter(KindAccessor(Node()), ["group"]), InFilter(NameAccessor(Node()), ["mygroup"]) ), NotFilter( AndFilter( - InFilter(NodeTypeAccessor(Node()), ["compound"]), + InFilter(NodeTypeAccessor(Node()), [parser.Node_memberdefType]), InFilter(KindAccessor(Node()), ["group"]), ) ) ) -Which is to say that we want to let through a compound, with kind group, with name 'mygroup' but -we're also happy if the node is **not** a compund with kind group. Really we just don't want to let -through any compounds with kind group with name other than 'mygroup'. As such, we can rephrase this -as: +Which is to say that we want to let through a Node_memberdefType, with kind group, with name +'mygroup' but we're also happy if the node is **not** a Node_memberdefType with kind group. Really +we just don't want to let through any Node_memberdefTypes with kind group with name other than +'mygroup'. As such, we can rephrase this as: NotFilter( AndFilter( - InFilter(NodeTypeAccessor(Node()), ["compound"]), + InFilter(NodeTypeAccessor(Node()), [parser.Node_memberdefType]), InFilter(KindAccessor(Node()), ["group"]), NotFilter(InFilter(NameAccessor(Node()), ["mygroup"])) ) @@ -134,13 +134,14 @@ Using logical manipulation we can rewrite this as: OrFilter( - NotFilter(InFilter(NodeTypeAccessor(Node()), ["compound"])), + NotFilter(InFilter(NodeTypeAccessor(Node()), [parser.Node_memberdefType])), NotFilter(InFilter(KindAccessor(Node()), ["group"])), InFilter(NameAccessor(Node()), ["mygroup"]) ) -We reads: allow if it isn't a compound, or if it is a compound but doesn't have a 'kind' of 'group', -but if it is a compound and has a 'kind' of 'group then only allow it if it is named 'mygroup'. +We reads: allow if it isn't a Node_memberdefType, or if it is a Node_memberdefType but doesn't have +a 'kind' of 'group', but if it is a Node_memberdefType and has a 'kind' of 'group then only allow it +if it is named 'mygroup'. Helper Syntax @@ -171,12 +172,12 @@ AndFilters, OrFilters and NotFilters respectively. We have to override the binary operators as they actual 'and', 'or' and 'not' operators cannot be overridden. So: - (node.node_type == 'compound') & (node.name == 'mygroup') + (type.node_type == parser.Node_CompoundType) & (node.name == 'mygroup') Translates to: AndFilter( - InFilter(NodeTypeAccessor(Node()), ["compound"])), + InFilter(NodeTypeAccessor(Node()), [parser.Node_CompoundType])), InFilter(NameAccessor(Node()), ["mygroup"]) ) @@ -191,20 +192,23 @@ we have to include additional parenthesis in the expressions to group them as we want. So instead of writing: - node.node_type == 'compound' & node.name == 'mygroup' + node.node_type == parser.Node_CompoundType & node.name == 'mygroup' We have to write: - (node.node_type == 'compound') & (node.name == 'mygroup') + (node.node_type == parser.Node_CompoundType) & (node.name == 'mygroup') """ -from breathe import path_handler +from __future__ import annotations + +from breathe import path_handler, parser from sphinx.application import Sphinx import os from typing import Any, Callable, Dict, List +from collections.abc import Iterable class UnrecognisedKindError(Exception): @@ -226,7 +230,7 @@ def node_type(self): @property def kind(self): - return AttributeAccessor(self, "kind") + return KindAccessor(self) @property def node_name(self): @@ -269,11 +273,17 @@ class Parent(Selector): def __call__(self, node_stack): return node_stack[1] + def __repr__(self) -> str: + return 'Parent()' + class Node(Selector): def __call__(self, node_stack): return node_stack[0] + def __repr__(self) -> str: + return 'Node()' + ############################################################################### # Accessors @@ -287,19 +297,19 @@ def __init__(self, selector: Selector) -> None: def __call__(self, node_stack): raise NotImplementedError - def __eq__(self, value: str) -> "InFilter": # type: ignore + def __eq__(self, value: Any) -> InFilter: return InFilter(self, [value]) - def __ne__(self, value: str) -> "NotFilter": # type: ignore + def __ne__(self, value: Any) -> NotFilter: return NotFilter(InFilter(self, [value])) - def is_one_of(self, collection: List[str]) -> "InFilter": + def is_one_of(self, collection: Iterable[Any]) -> InFilter: return InFilter(self, collection) - def has_content(self) -> "HasContentFilter": + def has_content(self) -> HasContentFilter: return HasContentFilter(self) - def endswith(self, options: List[str]) -> "EndsWithFilter": + def endswith(self, options: List[str]) -> EndsWithFilter: return EndsWithFilter(self, options) @@ -319,22 +329,16 @@ def __call__(self, node_stack) -> str: class NodeTypeAccessor(Accessor): - def __call__(self, node_stack) -> str: - data_object = self.selector(node_stack) - try: - return data_object.node_type - except AttributeError as e: - # Horrible hack to silence errors on filtering unicode objects - # until we fix the parsing - if type(data_object) == str: - return "unicode" - else: - raise e + def __call__(self, node_stack) -> type: + return type(self.selector(node_stack)) + + def __repr__(self) -> str: + return f'NodeTypeAccessor({self.selector!r})' class KindAccessor(Accessor): - def __call__(self, node_stack) -> str: - return self.selector(node_stack).kind + def __call__(self, node_stack): + return self.selector(node_stack).kind.value class AttributeAccessor(Accessor): @@ -350,6 +354,9 @@ def __init__(self, selector: Selector, attribute_name: str) -> None: def __call__(self, node_stack) -> Any: return getattr(self.selector(node_stack), self.attribute_name) + def __repr__(self) -> str: + return f'AttributeAccessor({self.selector!r}, {self.attribute_name!r})' + class LambdaAccessor(Accessor): def __init__(self, selector: Selector, func: Callable[[Any], str]): @@ -374,10 +381,10 @@ class Filter: def allow(self, node_stack) -> bool: raise NotImplementedError - def __and__(self, other: "Filter") -> "AndFilter": + def __and__(self, other: "Filter") -> AndFilter: return AndFilter(self, other) - def __or__(self, other: "Filter") -> "OrFilter": + def __or__(self, other: "Filter") -> OrFilter: return OrFilter(self, other) def __invert__(self) -> "NotFilter": @@ -407,7 +414,7 @@ class EndsWithFilter(Filter): iterable parameter. """ - def __init__(self, accessor: Accessor, options: List[str]): + def __init__(self, accessor: Accessor, options: list[str]): self.accessor = accessor self.options = options @@ -422,14 +429,18 @@ def allow(self, node_stack) -> bool: class InFilter(Filter): """Checks if what is returned from the accessor is 'in' in the members""" - def __init__(self, accessor: Accessor, members: List[str]) -> None: + def __init__(self, accessor: Accessor, members: Iterable[Any]) -> None: self.accessor = accessor - self.members = members + self.members = frozenset(members) def allow(self, node_stack) -> bool: name = self.accessor(node_stack) return name in self.members + def __repr__(self) -> str: + mem_str = ', '.join(repr(m) for m in self.members) + return f'InFilter({self.accessor!r}, {{{mem_str}}})' + class GlobFilter(Filter): def __init__(self, accessor: Accessor, glob): @@ -502,7 +513,10 @@ def allow(self, node_stack) -> bool: class AndFilter(Filter): def __init__(self, *filters: Filter): - self.filters = filters + self.filters = [] + for f in filters: + if isinstance(f,AndFilter): self.filters.extend(f.filters) + else: self.filters.append(f) def allow(self, node_stack) -> bool: # If any filter returns False then return False @@ -511,12 +525,19 @@ def allow(self, node_stack) -> bool: return False return True + def __repr__(self) -> str: + args = ', '.join(map(repr,self.filters)) + return f'AndFilter({args})' + class OrFilter(Filter): """Provides a short-cutted 'or' operation between two filters""" def __init__(self, *filters: Filter): - self.filters = filters + self.filters = [] + for f in filters: + if isinstance(f,OrFilter): self.filters.extend(f.filters) + else: self.filters.append(f) def allow(self, node_stack) -> bool: # If any filter returns True then return True @@ -525,6 +546,10 @@ def allow(self, node_stack) -> bool: return True return False + def __repr__(self) -> str: + args = ', '.join(map(repr,self.filters)) + return f'OrFilter({args})' + class IfFilter(Filter): def __init__(self, condition, if_true, if_false): @@ -579,14 +604,14 @@ class FilterFactory: def __init__(self, app: Sphinx) -> None: self.app = app - def create_render_filter(self, kind: str, options: Dict[str, Any]) -> Filter: + def create_render_filter(self, kind: str, options: dict[str, Any]) -> Filter: """Render filter for group & namespace blocks""" if kind not in ["group", "page", "namespace"]: raise UnrecognisedKindError(kind) # Generate new dictionary from defaults - filter_options = dict((entry, "") for entry in self.app.config.breathe_default_members) + filter_options: dict[str, Any] = {entry: "" for entry in self.app.config.breathe_default_members} # Update from the actual options filter_options.update(options) @@ -618,11 +643,11 @@ def create_render_filter(self, kind: str, options: Dict[str, Any]) -> Filter: & self.create_outline_filter(filter_options) ) - def create_class_filter(self, target: str, options: Dict[str, Any]) -> Filter: + def create_class_filter(self, target: str, options: dict[str, Any]) -> Filter: """Content filter for classes based on various directive options""" # Generate new dictionary from defaults - filter_options = dict((entry, "") for entry in self.app.config.breathe_default_members) + filter_options: dict[str, Any] = {entry: "" for entry in self.app.config.breathe_default_members} # Update from the actual options filter_options.update(options) @@ -651,7 +676,7 @@ def create_innerclass_filter(self, options: Dict[str, Any], outerclass: str = "" parent_is_compounddef = parent.node_type == "compounddef" parent_is_class = parent.kind.is_one_of(["class", "struct", "interface"]) - allowed = set() + allowed: set[str] = set() all_options = { "protected-members": "protected", "private-members": "private", @@ -698,8 +723,8 @@ def create_show_filter(self, options: Dict[str, Any]) -> Filter: except KeyError: # Allow through everything except the header-file includes nodes return OrFilter( - NotFilter(InFilter(NodeTypeAccessor(Parent()), ["compounddef"])), - NotFilter(InFilter(NodeTypeAccessor(Node()), ["inc"])), + NotFilter(InFilter(NodeTypeAccessor(Parent()), [parser.Node_compounddefType])), + NotFilter(InFilter(NodeTypeAccessor(Node()), [parser.Node_incType])), ) if text == "header-file": @@ -708,8 +733,8 @@ def create_show_filter(self, options: Dict[str, Any]) -> Filter: # Allow through everything except the header-file includes nodes return OrFilter( - NotFilter(InFilter(NodeTypeAccessor(Parent()), ["compounddef"])), - NotFilter(InFilter(NodeTypeAccessor(Node()), ["inc"])), + NotFilter(InFilter(NodeTypeAccessor(Parent()), [parser.Node_compounddefType])), + NotFilter(InFilter(NodeTypeAccessor(Node()), [parser.Node_incType])), ) def _create_description_filter( @@ -836,7 +861,7 @@ def create_class_member_filter(self, options: Dict[str, Any]) -> Filter: def create_outline_filter(self, options: Dict[str, Any]) -> Filter: if "outline" in options: node = Node() - return ~node.node_type.is_one_of(["description", "inc"]) + return ~node.node_type.is_one_of([parser.Node_descriptionType, parser.Node_incType]) else: return OpenFilter() @@ -853,7 +878,7 @@ def create_file_filter(self, filename: str, options: Dict[str, Any]) -> Filter: # the NotFilter this chunk always returns true and # so does not affect the result of the filtering AndFilter( - InFilter(NodeTypeAccessor(Node()), ["compounddef"]), + InFilter(NodeTypeAccessor(Node()), [parser.Node_compounddefType]), InFilter(KindAccessor(Node()), ["file"]), FilePathFilter(LambdaAccessor(Node(), lambda x: x.location), filename), Gather(LambdaAccessor(Node(), lambda x: x.namespaces), valid_names), @@ -868,8 +893,8 @@ def create_file_filter(self, filename: str, options: Dict[str, Any]) -> Filter: # required as the location attribute for the # namespace in the xml is unreliable. AndFilter( - InFilter(NodeTypeAccessor(Parent()), ["compounddef"]), - InFilter(NodeTypeAccessor(Node()), ["ref"]), + InFilter(NodeTypeAccessor(Parent()), [parser.Node_compounddefType]), + InFilter(NodeTypeAccessor(Node()), [parser.Node_refType]), InFilter(NodeNameAccessor(Node()), ["innerclass", "innernamespace"]), NotFilter( InFilter( @@ -883,8 +908,8 @@ def create_file_filter(self, filename: str, options: Dict[str, Any]) -> Filter: # namespace that is going to be rendered as they will be # rendered with that namespace and we don't want them twice AndFilter( - InFilter(NodeTypeAccessor(Parent()), ["compounddef"]), - InFilter(NodeTypeAccessor(Node()), ["ref"]), + InFilter(NodeTypeAccessor(Parent()), [parser.Node_compounddefType]), + InFilter(NodeTypeAccessor(Node()), [parser.Node_refType]), InFilter(NodeNameAccessor(Node()), ["innerclass", "innernamespace"]), NamespaceFilter( NamespaceAccessor(Parent()), @@ -898,7 +923,7 @@ def create_file_filter(self, filename: str, options: Dict[str, Any]) -> Filter: # cross into a namespace xml file which has entries # from multiple files in it AndFilter( - InFilter(NodeTypeAccessor(Node()), ["memberdef"]), + InFilter(NodeTypeAccessor(Node()), [parser.Node_memberdefType]), NotFilter( FilePathFilter(LambdaAccessor(Node(), lambda x: x.location), filename) ), @@ -914,7 +939,7 @@ def create_file_filter(self, filename: str, options: Dict[str, Any]) -> Filter: # location even if they namespace is spread over # multiple files AndFilter( - InFilter(NodeTypeAccessor(Node()), ["compounddef"]), + InFilter(NodeTypeAccessor(Node()), [parser.Node_compounddefType]), NotFilter(InFilter(KindAccessor(Node()), ["namespace"])), NotFilter( FilePathFilter(LambdaAccessor(Node(), lambda x: x.location), filename) @@ -940,17 +965,17 @@ def create_content_filter(self, kind: str, options: Dict[str, Any]) -> Filter: node = Node() # Filter for public memberdefs - node_is_memberdef = node.node_type == "memberdef" + node_is_memberdef = node.node_type == parser.Node_memberdefType node_is_public = node.prot == "public" public_members = node_is_memberdef & node_is_public # Filter for public innerclasses parent = Parent() - parent_is_compounddef = parent.node_type == "compounddef" + parent_is_compounddef = parent.node_type == parser.Node_compounddefType parent_is_class = parent.kind == kind - node_is_innerclass = (node.node_type == "ref") & (node.node_name == "innerclass") + node_is_innerclass = (node.node_type == parser.Node_refType) & (node.node_name == "innerclass") node_is_public = node.prot == "public" public_innerclass = ( @@ -963,16 +988,16 @@ def create_index_filter(self, options: Dict[str, Any]) -> Filter: filter_ = AndFilter( NotFilter( AndFilter( - InFilter(NodeTypeAccessor(Parent()), ["compounddef"]), - InFilter(NodeTypeAccessor(Node()), ["ref"]), + InFilter(NodeTypeAccessor(Parent()), [parser.Node_compounddefType]), + InFilter(NodeTypeAccessor(Node()), [parser.Node_refType]), InFilter(NodeNameAccessor(Node()), ["innerclass", "innernamespace"]), ) ), NotFilter( AndFilter( - InFilter(NodeTypeAccessor(Parent()), ["compounddef"]), + InFilter(NodeTypeAccessor(Parent()), [parser.Node_compounddefType]), InFilter(KindAccessor(Parent()), ["group"]), - InFilter(NodeTypeAccessor(Node()), ["sectiondef"]), + InFilter(NodeTypeAccessor(Node()), [parser.Node_sectiondefType]), InFilter(KindAccessor(Node()), ["func"]), ) ), @@ -985,13 +1010,13 @@ def create_open_filter(self) -> Filter: return OpenFilter() - def create_id_filter(self, node_type: str, refid: str) -> Filter: + def create_id_filter(self, node_type: type, refid: str) -> Filter: node = Node() return (node.node_type == node_type) & (node.id == refid) def create_file_finder_filter(self, filename: str) -> Filter: filter_ = AndFilter( - InFilter(NodeTypeAccessor(Node()), ["compounddef"]), + InFilter(NodeTypeAccessor(Node()), [parser.Node_compounddefType]), InFilter(KindAccessor(Node()), ["file"]), FilePathFilter(LambdaAccessor(Node(), lambda x: x.location), filename), ) @@ -1003,11 +1028,11 @@ def create_member_finder_filter(self, namespace: str, name: str, kind: str) -> F node = Node() parent = Parent() - node_matches = (node.node_type == "member") & (node.kind == kind) & (node.name == name) + node_matches = (node.node_type == parser.Node_MemberType) & (node.kind == kind) & (node.name == name) if namespace: parent_matches = ( - (parent.node_type == "compound") + (parent.node_type == parser.Node_CompoundType) & ( (parent.kind == "namespace") | (parent.kind == "class") @@ -1021,7 +1046,7 @@ def create_member_finder_filter(self, namespace: str, name: str, kind: str) -> F is_implementation_file = parent.name.endswith( self.app.config.breathe_implementation_filename_extensions ) - parent_is_compound = parent.node_type == "compound" + parent_is_compound = parent.node_type == parser.Node_CompoundType parent_is_file = (parent.kind == "file") & (~is_implementation_file) parent_is_not_file = parent.kind != "file" @@ -1031,7 +1056,7 @@ def create_member_finder_filter(self, namespace: str, name: str, kind: str) -> F def create_function_and_all_friend_finder_filter(self, namespace: str, name: str) -> Filter: parent = Parent() - parent_is_compound = parent.node_type == "compound" + parent_is_compound = parent.node_type == parser.Node_CompoundType parent_is_group = parent.kind == "group" function_filter = self.create_member_finder_filter(namespace, name, "function") @@ -1045,13 +1070,13 @@ def create_enumvalue_finder_filter(self, name: str) -> Filter: """Returns a filter which looks for an enumvalue with the specified name.""" node = Node() - return (node.node_type == "enumvalue") & (node.name == name) + return (node.node_type == parser.Node_enumvalueType) & (node.name == name) def create_compound_finder_filter(self, name: str, kind: str) -> Filter: """Returns a filter which looks for a compound with the specified name and kind.""" node = Node() - return (node.node_type == "compound") & (node.kind == kind) & (node.name == name) + return (node.node_type == parser.Node_CompoundType) & (node.kind == kind) & (node.name == name) def create_finder_filter(self, kind: str, name: str) -> Filter: """Returns a filter which looks for the compound node from the index which is a group node @@ -1063,20 +1088,20 @@ def create_finder_filter(self, kind: str, name: str) -> Filter: if kind == "group": filter_ = AndFilter( - InFilter(NodeTypeAccessor(Node()), ["compound"]), + InFilter(NodeTypeAccessor(Node()), [parser.Node_CompoundType]), InFilter(KindAccessor(Node()), ["group"]), InFilter(NameAccessor(Node()), [name]), ) elif kind == "page": filter_ = AndFilter( - InFilter(NodeTypeAccessor(Node()), ["compound"]), + InFilter(NodeTypeAccessor(Node()), [parser.Node_CompoundType]), InFilter(KindAccessor(Node()), ["page"]), InFilter(NameAccessor(Node()), [name]), ) else: # Assume kind == 'namespace' filter_ = AndFilter( - InFilter(NodeTypeAccessor(Node()), ["compound"]), + InFilter(NodeTypeAccessor(Node()), [parser.Node_CompoundType]), InFilter(KindAccessor(Node()), ["namespace"]), InFilter(NameAccessor(Node()), [name]), ) diff --git a/breathe/renderer/mask.py b/breathe/renderer/mask.py index 3bb158f0..5e968140 100644 --- a/breathe/renderer/mask.py +++ b/breathe/renderer/mask.py @@ -39,20 +39,9 @@ def __init__(self, lookup): self.lookup = lookup def mask(self, data_object): - try: - node_type = data_object.node_type - except AttributeError as e: - # Horrible hack to silence errors on filtering unicode objects - # until we fix the parsing - if isinstance(data_object, str): - node_type = "unicode" - else: - raise e - - if node_type in self.lookup: - Mask = self.lookup[node_type] - return Mask(data_object) - return data_object + m = self.lookup.get(type(data_object)) + if m is None: return data_object + return m(data_object) class NullMaskFactory(MaskFactoryBase): diff --git a/breathe/renderer/sphinxrenderer.py b/breathe/renderer/sphinxrenderer.py index d0956ce7..047a106b 100644 --- a/breathe/renderer/sphinxrenderer.py +++ b/breathe/renderer/sphinxrenderer.py @@ -3,7 +3,7 @@ import os import sphinx -from breathe import parser +from breathe import parser, filetypes from sphinx import addnodes from sphinx.domains import cpp, c, python @@ -15,20 +15,25 @@ from docutils.statemachine import StringList, UnexpectedIndentationError from docutils.parsers.rst.states import Text +import re +import textwrap +from typing import Any, Callable, cast, ClassVar, Generic, Optional, Protocol, Type, TypeVar, TYPE_CHECKING, Union +from collections.abc import Iterable, Sequence + +php: Any try: from sphinxcontrib import phpdomain as php # type: ignore except ImportError: - php = None + php = None # type: ignore +cs: Any try: from sphinx_csharp import csharp as cs # type: ignore except ImportError: cs = None -import re -import textwrap -from typing import Any, Callable, Optional, Type, TYPE_CHECKING, Union -from collections.abc import Iterable + +T = TypeVar('T') if TYPE_CHECKING: from breathe.project import ProjectInfo @@ -38,7 +43,15 @@ from sphinx.application import Sphinx from sphinx.directives import ObjectDescription - from docutils.nodes import Node, TextElement + from docutils.nodes import Node, Element, TextElement + + class HasRefID(Protocol): + @property + def refid(self) -> str: ... + + class HasTemplateParamList(Protocol): + @property + def templateparamlist(self) -> parser.Node_templateparamlistType | None: ... ContentCallback = Callable[[addnodes.desc_content], None] Declarator = Union[addnodes.desc_signature, addnodes.desc_signature_line] @@ -167,7 +180,7 @@ class PyClasslike(BaseObject, python.PyClasslike): # classes from phpdomain. # We use capitalization (and the namespace) to differentiate between the two -if php is not None: +if php is not None or TYPE_CHECKING: class PHPNamespaceLevel(BaseObject, php.PhpNamespacelevel): """Description of a PHP item *in* a namespace (not the space itself).""" @@ -186,7 +199,7 @@ class PHPGlobalLevel(BaseObject, php.PhpGloballevel): # ---------------------------------------------------------------------------- -if cs is not None: +if cs is not None or TYPE_CHECKING: class CSharpCurrentNamespace(BaseObject, cs.CSharpCurrentNamespace): pass @@ -337,7 +350,7 @@ def create(domain: str, args) -> ObjectDescription: cls, name = DomainDirectiveFactory.cs_classes[args[0]] else: domain = "cpp" - cls, name = DomainDirectiveFactory.cpp_classes[args[0]] # type: ignore + cls, name = DomainDirectiveFactory.cpp_classes[args[0]] # Replace the directive name because domain directives don't know how to handle # Breathe's "doxygen" directives. assert ":" not in name @@ -350,21 +363,21 @@ class NodeFinder(nodes.SparseNodeVisitor): def __init__(self, document): super().__init__(document) - self.declarator = None - self.content = None + self.declarator: Declarator | None = None + self.content: addnodes.desc_content | None = None - def visit_desc_signature(self, node): + def visit_desc_signature(self, node: addnodes.desc_signature): # Find the last signature node because it contains the actual declarator # rather than "template <...>". In Sphinx 1.4.1 we'll be able to use sphinx_cpp_tagname: # https://github.com/michaeljones/breathe/issues/242 self.declarator = node - def visit_desc_signature_line(self, node): + def visit_desc_signature_line(self, node: addnodes.desc_signature_line): # In sphinx 1.5, there is now a desc_signature_line node within the desc_signature # This should be used instead self.declarator = node - def visit_desc_content(self, node): + def visit_desc_content(self, node: addnodes.desc_content): self.content = node # The SparseNodeVisitor seems to not actually be universally Sparse, # but only for nodes known to Docutils. @@ -484,13 +497,104 @@ def inlinetext(self, match, context, next_state): self.parent += msg return [], next_state, [] - -class SphinxRenderer: +def get_content(node: parser.Node_docParaType): + # Add programlisting nodes to content rather than a separate list, + # because programlisting and content nodes can interleave as shown in + # https://www.stack.nl/~dimitri/doxygen/manual/examples/include/html/example.html. + + return (item + for item in node + if parser.tag_name_value(item)[0] not in {'parameterlist','simplesect','image'}) + +def get_parameterlists(node: parser.Node_docParaType): + return (value + for name,value in map(parser.tag_name_value,node) + if name == 'parameterlist') + +def get_simplesects(node: parser.Node_docParaType): + return (value + for name,value in map(parser.tag_name_value,node) + if name == 'simplesect') + +def get_images(node: parser.Node_docParaType): + return (value + for name,value in map(parser.tag_name_value,node) + if name == 'image') + + +class NodeHandler(Generic[T]): + """Dummy callable that associates a set of nodes to a function. This gets + unwrapped by NodeVisitor and is never actually called.""" + + def __init__(self,handler): + self.handler = handler + self.nodes: set[type[parser.Node]] = set() + + def __call__(self, r: SphinxRenderer, node: T, /) -> list[Node]: + raise TypeError() + +class TaggedNodeHandler(Generic[T]): + """Dummy callable that associates a set of nodes to a function. This gets + unwrapped by NodeVisitor and is never actually called.""" + + def __init__(self,handler): + self.handler = handler + self.nodes: set[type[parser.Node]] = set() + + def __call__(self, r: SphinxRenderer, tag: str, node: T, /) -> list[Node]: + raise TypeError() + +def node_handler(node: type[parser.Node] +) -> Callable[[Callable[[SphinxRenderer, T], list[Node]]],Callable[[SphinxRenderer, T], list[Node]]]: + def inner(f: Callable[[SphinxRenderer, T], list[Node]]) -> Callable[[SphinxRenderer, T], list[Node]]: + if not isinstance(f,NodeHandler): + f = NodeHandler(f) + f.nodes.add(node) + return f + return inner + +def tagged_node_handler(node: type[parser.Node] +) -> Callable[[Callable[[SphinxRenderer, str, T], list[Node]]],Callable[[SphinxRenderer, str, T], list[Node]]]: + def inner(f: Callable[[SphinxRenderer, str, T], list[Node]]) -> Callable[[SphinxRenderer, str, T], list[Node]]: + if not isinstance(f,TaggedNodeHandler): + f = TaggedNodeHandler(f) + f.nodes.add(node) + return f + return inner + +class NodeVisitor(type): + """Metaclass that collects all methods marked as @node_handler and + @tagged_node_handler into the dicts 'node_handlers' and + 'tagged_node_handlers' respectively, and assigns them to the class""" + + def __new__(cls, name, bases, members): + handlers = {} + tagged_handlers = {} + + for key, value in members.items(): + if isinstance(value, NodeHandler): + for n in value.nodes: + handlers[n] = value.handler + members[key] = value.handler + elif isinstance(value, TaggedNodeHandler): + for n in value.nodes: + tagged_handlers[n] = value.handler + members[key] = value.handler + + members['node_handlers'] = handlers + members['tagged_node_handlers'] = tagged_handlers + + return type.__new__(cls, name, bases, members) + +class SphinxRenderer(metaclass=NodeVisitor): """ Doxygen node visitor that converts input into Sphinx/RST representation. Each visit method takes a Doxygen node as an argument and returns a list of RST nodes. """ + node_handlers: ClassVar[dict[type[parser.Node], Callable[[SphinxRenderer, parser.Node], list[Node]]]] + tagged_node_handlers: ClassVar[dict[type[parser.Node], Callable[[SphinxRenderer, str, parser.Node], list[Node]]]] + def __init__( self, app: Sphinx, @@ -584,7 +688,7 @@ def get_filename(node) -> Optional[str]: if isinstance(node, (str,parser.Node_enumvalueType)): node = node_stack[1] filename = get_filename(node) - if not filename and node.node_type == "compound": + if not filename and isinstance(node,parser.Node_CompoundType): file_data = self.compound_parser.parse(node.refid) filename = get_filename(file_data.compounddef) return self.project_info.domain_for_file(filename) if filename else "" @@ -644,6 +748,7 @@ def run_directive( finder = NodeFinder(rst_node.document) rst_node.walk(finder) + assert finder.declarator signode = finder.declarator if self.context.child: @@ -653,16 +758,14 @@ def run_directive( def handle_declaration( self, node, + obj_type: str, declaration: str, *, - obj_type: Optional[str] = None, - content_callback: Optional[ContentCallback] = None, - display_obj_type: Optional[str] = None, - declarator_callback: Optional[DeclaratorCallback] = None, + content_callback: ContentCallback | None = None, + display_obj_type: str | None = None, + declarator_callback: DeclaratorCallback | None = None, options={}, ) -> list[Node]: - if obj_type is None: - obj_type = node.kind if content_callback is None: def content(contentnode): @@ -673,6 +776,7 @@ def content(contentnode): nodes_ = self.run_directive(obj_type, declaration, content_callback, options) assert self.app.env is not None + target = None if self.app.env.config.breathe_debug_trace_doxygen_ids: target = self.create_doxygen_target(node) if len(target) == 0: @@ -722,6 +826,7 @@ def content(contentnode): declarator[0] = addnodes.desc_annotation(txt, txt) if not self.app.env.config.breathe_debug_trace_doxygen_ids: target = self.create_doxygen_target(node) + assert target is not None declarator.insert(0, target) if declarator_callback: declarator_callback(declarator) @@ -747,14 +852,14 @@ def debug_print_node(n): names: list[str] = [] for node in self.qualification_stack[1:]: if config.breathe_debug_trace_qualification: - print("{}{}".format(_debug_indent * " ", debug_print_node(node))) - if node.node_type == "ref" and len(names) == 0: + print("{}{}".format(_debug_indent * " ", debug_print_node(node))) # type: ignore + if isinstance(node,parser.Node_refType) and len(names) == 0: if config.breathe_debug_trace_qualification: print("{}{}".format(_debug_indent * " ", "res=")) return [] if ( - node.node_type == "compound" and node.kind not in ["file", "namespace", "group"] - ) or node.node_type == "memberdef": + isinstance(node,parser.Node_CompoundType) and node.kind not in [parser.CompoundKind.file, parser.CompoundKind.namespace, parser.CompoundKind.group] + ) or isinstance(node,parser.Node_memberdefType): # We skip the 'file' entries because the file name doesn't form part of the # qualified name for the identifier. We skip the 'namespace' entries because if we # find an object through the namespace 'compound' entry in the index.xml then we'll @@ -762,7 +867,7 @@ def debug_print_node(n): # need the 'compounddef' entry because if we find the object through the 'file' # entry in the index.xml file then we need to get the namespace name from somewhere names.append(node.name) - if node.node_type == "compounddef" and node.kind == "namespace": + if isinstance(node,parser.Node_compounddefType) and node.kind == parser.DoxCompoundKind.namespace: # Nested namespaces include their parent namespace(s) in compoundname. ie, # compoundname is 'foo::bar' instead of just 'bar' for namespace 'bar' nested in # namespace 'foo'. We need full compoundname because node_stack doesn't necessarily @@ -780,21 +885,21 @@ def debug_print_node(n): # =================================================================================== def get_fully_qualified_name(self): - + assert self.context names = [] node_stack = self.context.node_stack node = node_stack[0] # If the node is a namespace, use its name because namespaces are skipped in the main loop. - if node.node_type == "compound" and node.kind == "namespace": + if isinstance(node,parser.Node_CompoundType) and node.kind == parser.CompoundKind.namespace: names.append(node.name) for node in node_stack: - if node.node_type == "ref" and len(names) == 0: - return node.valueOf_ + if isinstance(node,parser.Node_refType) and len(names) == 0: + return ''.join(node) if ( - node.node_type == "compound" and node.kind not in ["file", "namespace", "group"] - ) or node.node_type == "memberdef": + isinstance(node,parser.Node_CompoundType) and node.kind not in [parser.CompoundKind.file, parser.CompoundKind.namespace, parser.CompoundKind.group] + ) or isinstance(node,parser.Node_memberdefType): # We skip the 'file' entries because the file name doesn't form part of the # qualified name for the identifier. We skip the 'namespace' entries because if we # find an object through the namespace 'compound' entry in the index.xml then we'll @@ -802,7 +907,7 @@ def get_fully_qualified_name(self): # need the 'compounddef' entry because if we find the object through the 'file' # entry in the index.xml file then we need to get the namespace name from somewhere names.insert(0, node.name) - if node.node_type == "compounddef" and node.kind == "namespace": + if isinstance(node,parser.Node_compounddefType) and node.kind == parser.DoxCompoundKind.namespace: # Nested namespaces include their parent namespace(s) in compoundname. ie, # compoundname is 'foo::bar' instead of just 'bar' for namespace 'bar' nested in # namespace 'foo'. We need full compoundname because node_stack doesn't necessarily @@ -812,13 +917,15 @@ def get_fully_qualified_name(self): return "::".join(names) - def create_template_prefix(self, decl) -> str: + def create_template_prefix(self, decl: HasTemplateParamList) -> str: if not decl.templateparamlist: return "" nodes = self.render(decl.templateparamlist) return "template<" + "".join(n.astext() for n in nodes) + ">" def run_domain_directive(self, kind, names): + assert self.context + domain_directive = DomainDirectiveFactory.create( self.context.domain, [kind, names] + self.context.directive_args[2:] ) @@ -847,6 +954,7 @@ def run_domain_directive(self, kind, names): finder = NodeFinder(rst_node.document) rst_node.walk(finder) + assert finder.declarator is not None signode = finder.declarator if len(names) > 0 and self.context.child: @@ -916,7 +1024,7 @@ def pullup(node, typ, dest): fieldLists = [fieldList] # collapse retvals into a single return field - if len(fieldLists) != 0 and sphinx.version_info[0:2] < (4, 3): + if len(fieldLists) != 0 and sphinx.version_info[0:2] < (4, 3): # type: ignore others: list[nodes.field] = [] retvals: list[nodes.field] = [] f: nodes.field @@ -935,7 +1043,7 @@ def pullup(node, typ, dest): # we created the retvals before, so we made this prefix assert fn.astext().startswith("returns ") val = nodes.strong("", fn.astext()[8:]) - # assumption from visit_Node_docParamListType: fb is a single paragraph or nothing + # assumption from visit_docparamlist: fb is a single paragraph or nothing assert len(fb) <= 1, fb bodyNodes = [val, nodes.Text(" -- ")] if len(fb) == 1: @@ -970,13 +1078,14 @@ def update_signature(self, signature, obj_type): else: signature.insert(0, annotation) - def render_declaration(self, node, declaration=None, description=None, **kwargs): + def render_declaration(self, node: parser.Node_memberdefType, declaration=None, description=None, **kwargs): if declaration is None: declaration = self.get_fully_qualified_name() obj_type = kwargs.get("objtype", None) if obj_type is None: - obj_type = node.kind + obj_type = node.kind.value nodes = self.run_domain_directive(obj_type, [declaration.replace("\n", " ")]) + target = None if self.app.env.config.breathe_debug_trace_doxygen_ids: target = self.create_doxygen_target(node) if len(target) == 0: @@ -988,6 +1097,8 @@ def render_declaration(self, node, declaration=None, description=None, **kwargs) finder = NodeFinder(rst_node.document) rst_node.walk(finder) + assert finder.declarator is not None + assert finder.content is not None signode = finder.declarator contentnode = finder.content @@ -998,11 +1109,13 @@ def render_declaration(self, node, declaration=None, description=None, **kwargs) description = self.description(node) if not self.app.env.config.breathe_debug_trace_doxygen_ids: target = self.create_doxygen_target(node) + assert target is not None signode.insert(0, target) contentnode.extend(description) return nodes - def visit_Node_DoxygenTypeIndex(self, node: parser.Node_DoxygenTypeIndex) -> list[Node]: + @node_handler(parser.Node_DoxygenTypeIndex) + def visit_doxygen(self, node: parser.Node_DoxygenTypeIndex) -> list[Node]: nodelist: list[Node] = [] # Process all the compound children @@ -1010,14 +1123,16 @@ def visit_Node_DoxygenTypeIndex(self, node: parser.Node_DoxygenTypeIndex) -> lis nodelist.extend(self.render(n)) return nodelist - def visit_Node_DoxygenType(self, node: parser.Node_DoxygenType) -> list[Node]: + @node_handler(parser.Node_DoxygenType) + def visit_doxygendef(self, node: parser.Node_DoxygenType) -> list[Node]: assert len(node.compounddef) == 1 return self.render(node.compounddef[0]) - def visit_union(self, node) -> list[Node]: + def visit_union(self, node: HasRefID) -> list[Node]: # Read in the corresponding xml file and process file_data = self.compound_parser.parse(node.refid) - nodeDef = file_data.compounddef + assert len(file_data.compounddef) == 1 + nodeDef = file_data.compounddef[0] assert self.context is not None parent_context = self.context.create_child_context(file_data) @@ -1040,13 +1155,14 @@ def content(contentnode): rendered_data = self.render(file_data, parent_context) contentnode.extend(rendered_data) - nodes = self.handle_declaration(nodeDef, declaration, content_callback=content) + nodes = self.handle_declaration(nodeDef, nodeDef.kind.value, declaration, content_callback=content) return nodes - def visit_class(self, node) -> list[Node]: + def visit_class(self, node: HasRefID) -> list[Node]: # Read in the corresponding xml file and process file_data = self.compound_parser.parse(node.refid) - nodeDef = file_data.compounddef + assert len(file_data.compounddef) == 1 + nodeDef = file_data.compounddef[0] assert self.context is not None parent_context = self.context.create_child_context(file_data) @@ -1081,10 +1197,10 @@ def visit_class(self, node) -> list[Node]: else: first = False if base.prot is not None and domain != "cs": - decls.append(base.prot) + decls.append(base.prot.value) if base.virt == "virtual": decls.append("virtual") - decls.append(base.content_[0].value) + decls.append(base[0]) declaration = " ".join(decls) def content(contentnode) -> None: @@ -1096,10 +1212,10 @@ def content(contentnode) -> None: rendered_data = self.render(file_data, parent_context) contentnode.extend(rendered_data) - assert kind in ("class", "struct", "interface") - display_obj_type = "interface" if kind == "interface" else None + assert kind in (parser.DoxCompoundKind.class_, parser.DoxCompoundKind.struct, parser.DoxCompoundKind.interface) + display_obj_type = "interface" if kind == parser.DoxCompoundKind.interface else None nodes = self.handle_declaration( - nodeDef, declaration, content_callback=content, display_obj_type=display_obj_type + nodeDef, nodeDef.kind.value, declaration, content_callback=content, display_obj_type=display_obj_type ) if "members-only" in self.context.directive_args[2]: assert len(nodes) >= 2 @@ -1109,10 +1225,11 @@ def content(contentnode) -> None: return nodes[1][1].children return nodes - def visit_namespace(self, node) -> list[Node]: + def visit_namespace(self, node: HasRefID) -> list[Node]: # Read in the corresponding xml file and process file_data = self.compound_parser.parse(node.refid) - nodeDef = file_data.compounddef + assert len(file_data.compounddef) == 1 + nodeDef = file_data.compounddef[0] assert self.context is not None parent_context = self.context.create_child_context(file_data) @@ -1139,94 +1256,115 @@ def content(contentnode): display_obj_type = "namespace" if self.get_domain() != "py" else "module" nodes = self.handle_declaration( - nodeDef, declaration, content_callback=content, display_obj_type=display_obj_type + nodeDef, nodeDef.kind.value, declaration, content_callback=content, display_obj_type=display_obj_type ) return nodes - def visit_compound(self, node, render_empty_node=True, **kwargs) -> list[Node]: + def visit_compound( + self, + node: HasRefID, + render_empty_node=True, + *, + get_node_info: Callable[[parser.Node_DoxygenType], tuple[str, parser.CompoundKind]] | None = None, + render_signature: Callable[ + [parser.Node_DoxygenType,Sequence[Element],str,parser.CompoundKind], + tuple[list[Node],addnodes.desc_content]] | None = None) -> list[Node]: # Read in the corresponding xml file and process file_data = self.compound_parser.parse(node.refid) + assert len(file_data.compounddef) == 1 - def get_node_info(file_data): + def def_get_node_info(file_data): + assert isinstance(node,parser.Node_CompoundType) return node.name, node.kind + if get_node_info is None: + get_node_info = def_get_node_info - name, kind = kwargs.get("get_node_info", get_node_info)(file_data) - if kind == "union": + name, kind = get_node_info(file_data) + if kind == parser.CompoundKind.union: dom = self.get_domain() assert not dom or dom in ("c", "cpp") return self.visit_union(node) - elif kind in ("struct", "class", "interface"): + elif kind in (parser.CompoundKind.struct, parser.CompoundKind.class_, parser.CompoundKind.interface): dom = self.get_domain() if not dom or dom in ("c", "cpp", "py", "cs"): return self.visit_class(node) - elif kind == "namespace": + elif kind == parser.CompoundKind.namespace: dom = self.get_domain() if not dom or dom in ("c", "cpp", "py", "cs"): return self.visit_namespace(node) assert self.context is not None parent_context = self.context.create_child_context(file_data) - new_context = parent_context.create_child_context(file_data.compounddef) + new_context = parent_context.create_child_context(file_data.compounddef[0]) rendered_data = self.render(file_data, parent_context) if not rendered_data and not render_empty_node: return [] - def render_signature(file_data, doxygen_target, name, kind): + def def_render_signature( + file_data: parser.Node_DoxygenType, + doxygen_target, + name, + kind: parser.CompoundKind) -> tuple[list[Node],addnodes.desc_content]: # Defer to domains specific directive. - templatePrefix = self.create_template_prefix(file_data.compounddef) + assert len(file_data.compounddef) == 1 + templatePrefix = self.create_template_prefix(file_data.compounddef[0]) arg = "%s %s" % (templatePrefix, self.get_fully_qualified_name()) # add base classes - if kind in ("class", "struct"): + if kind in (parser.CompoundKind.class_, parser.CompoundKind.struct): bs = [] - for base in file_data.compounddef.basecompoundref: + for base in file_data.compounddef[0].basecompoundref: b = [] if base.prot is not None: b.append(base.prot) - if base.virt == "virtual": + if base.virt == parser.DoxVirtualKind.virtual: b.append("virtual") - b.append(base.content_[0].value) + b.append(base[0]) bs.append(" ".join(b)) if len(bs) != 0: arg += " : " arg += ", ".join(bs) + assert self.context is not None self.context.directive_args[1] = [arg] - nodes = self.run_domain_directive(kind, self.context.directive_args[1]) + nodes = self.run_domain_directive(kind.value, self.context.directive_args[1]) rst_node = nodes[1] finder = NodeFinder(rst_node.document) rst_node.walk(finder) + assert finder.declarator is not None + assert finder.content is not None - if kind in ("interface", "namespace"): + if kind in (parser.CompoundKind.interface, parser.CompoundKind.namespace): # This is not a real C++ declaration type that Sphinx supports, # so we hax the replacement of it. - finder.declarator[0] = addnodes.desc_annotation(kind + " ", kind + " ") + finder.declarator[0] = addnodes.desc_annotation(kind.value + " ", kind.value + " ") rst_node.children[0].insert(0, doxygen_target) return nodes, finder.content + if render_signature is None: + render_signature = def_render_signature refid = self.get_refid(node.refid) - render_sig = kwargs.get("render_signature", render_signature) with WithContext(self, new_context): # Pretend that the signature is being rendered in context of the # definition, for proper domain detection - nodes, contentnode = render_sig( + nodes, contentnode = render_signature( file_data, self.target_handler.create_target(refid), name, kind ) - if file_data.compounddef.includes: - for include in file_data.compounddef.includes: + if file_data.compounddef[0].includes: + for include in file_data.compounddef[0].includes: contentnode.extend(self.render(include, new_context.create_child_context(include))) contentnode.extend(rendered_data) return nodes - def visit_file(self, node) -> list[Node]: - def render_signature(file_data, doxygen_target, name, kind): + def visit_file(self, node: parser.Node_CompoundType) -> list[Node]: + def render_signature(file_data, doxygen_target, name, kind) -> tuple[list[Node],addnodes.desc_content]: assert self.context is not None options = self.context.directive_args[2] @@ -1243,14 +1381,14 @@ def render_signature(file_data, doxygen_target, name, kind): title_signode.extend(targets) # Set up the title - title_signode.append(nodes.emphasis(text=kind)) + title_signode.append(nodes.emphasis(text=kind.value)) title_signode.append(nodes.Text(" ")) title_signode.append(addnodes.desc_name(text=name)) rst_node.append(title_signode) rst_node.document = self.state.document - rst_node["objtype"] = kind + rst_node["objtype"] = kind.value rst_node["domain"] = self.get_domain() if self.get_domain() else "cpp" contentnode = addnodes.desc_content() @@ -1306,13 +1444,14 @@ def render_signature(file_data, doxygen_target, name, kind): (parser.DoxSectionKind.var, "Variables"), ] - def render_iterable(self, iterable: Iterable) -> list[Node]: + def render_iterable(self, iterable: Iterable[parser.Node]) -> list[Node]: output: list[Node] = [] for entry in iterable: output.extend(self.render(entry)) return output - def visit_Node_compounddefType(self, node: parser.Node_compounddefType) -> list[Node]: + @node_handler(parser.Node_compounddefType) + def visit_compounddef(self, node: parser.Node_compounddefType) -> list[Node]: assert self.context is not None options = self.context.directive_args[2] section_order = None @@ -1323,7 +1462,7 @@ def visit_Node_compounddefType(self, node: parser.Node_compounddefType) -> list[ membergroup_order = {sec: i for i, sec in enumerate(options["membergroups"].split(" "))} nodemap: dict[int, list[Node]] = {} - def addnode(kind, lam): + def addnode(kind: str, lam): if section_order is None: nodemap[len(nodemap)] = lam() elif kind in section_order: @@ -1365,7 +1504,7 @@ def render_derivedcompoundref(node): # Get all sub sections for sectiondef in node.sectiondef: kind = sectiondef.kind - if section_order is not None and kind not in section_order: + if section_order is not None and kind.value not in section_order: continue header = sectiondef.header if membergroup_order is not None and header not in membergroup_order: @@ -1376,7 +1515,7 @@ def render_derivedcompoundref(node): continue rst_node = nodes.container(classes=["breathe-sectiondef"]) rst_node.document = self.state.document - rst_node["objtype"] = kind + rst_node["objtype"] = kind.value rst_node.extend(child_nodes) # We store the nodes as a list against the kind in a dictionary as the kind can be # 'user-edited' and that can repeat so this allows us to collect all the 'user-edited' @@ -1385,7 +1524,7 @@ def render_derivedcompoundref(node): # Order the results in an appropriate manner for kind, _ in self.sections: - addnode(kind, lambda: section_nodelists.get(kind.value, [])) + addnode(kind.value, lambda: section_nodelists.get(kind.value, [])) # Take care of innerclasses addnode("innerclass", lambda: self.render_iterable(node.innerclass)) @@ -1396,7 +1535,7 @@ def render_derivedcompoundref(node): file_data = self.compound_parser.parse(cnode.refid) assert len(file_data.compounddef) == 1 inner = file_data.compounddef[0] - addnode("innergroup", lambda: self.visit_Node_compounddefType(inner)) + addnode("innergroup", lambda: self.visit_compounddef(inner)) nodelist = [] for _, nodes_ in sorted(nodemap.items()): @@ -1406,7 +1545,8 @@ def render_derivedcompoundref(node): section_titles = dict(sections) - def visit_Node_sectiondefType(self, node: parser.Node_sectiondefType) -> list[Node]: + @node_handler(parser.Node_sectiondefType) + def visit_sectiondef(self, node: parser.Node_sectiondefType) -> list[Node]: assert self.context is not None options = self.context.directive_args[2] node_list = [] @@ -1429,7 +1569,7 @@ def visit_Node_sectiondefType(self, node: parser.Node_sectiondefType) -> list[No # Group" if the user didn't name the section # This is different to Doxygen which will track the groups and name # them Group1, Group2, Group3, etc. - if node.kind == "user-defined": + if node.kind == parser.DoxSectionKind.user_defined: if node.header: text = node.header else: @@ -1447,14 +1587,20 @@ def visit_Node_sectiondefType(self, node: parser.Node_sectiondefType) -> list[No return res + node_list return [] - def visit_Node_docRefTextType(self, node: parser.Node_docRefTextType|parser.Node_incType) -> list[Node]: + @node_handler(parser.Node_docRefTextType) + def visit_docreftext(self, node: parser.Node_docRefTextType | parser.Node_incType) -> list[Node]: nodelist = self.render_iterable(node) - if hasattr(node, "para"): - nodelist.extend(self.render_iterable(node.para)) + + # TODO: "para" in compound.xsd is an empty tag; figure out what this is + # supposed to do + if isinstance(node,parser.Node_docRefTextType): + for name,value in map(parser.tag_name_value,node): + if name == 'para': + nodelist.extend(self.render(value)) refid = self.get_refid(node.refid) - nodelist = [ + nodelist: list[Node] = [ addnodes.pending_xref( "", reftype="ref", @@ -1467,7 +1613,8 @@ def visit_Node_docRefTextType(self, node: parser.Node_docRefTextType|parser.Node ] return nodelist - def visit_Node_docHeadingTyp(self, node: parser.Node_docHeadingType) -> list[Node]: + @node_handler(parser.Node_docHeadingType) + def visit_docheading(self, node: parser.Node_docHeadingType) -> list[Node]: """Heading renderer. Renders embedded headlines as emphasized text. Different heading levels @@ -1476,7 +1623,8 @@ def visit_Node_docHeadingTyp(self, node: parser.Node_docHeadingType) -> list[Nod nodelist = self.render_iterable(node) return [nodes.emphasis("", "", *nodelist)] - def visit_Node_docParaType(self, node: parser.Node_docParaType) -> list[Node]: + @node_handler(parser.Node_docParaType) + def visit_docpara(self, node: parser.Node_docParaType) -> list[Node]: """ tags in the Doxygen output tend to contain either text or a single other tag of interest. So whilst it looks like we're combined descriptions and program listings and @@ -1487,9 +1635,11 @@ def visit_Node_docParaType(self, node: parser.Node_docParaType) -> list[Node]: nodelist = [] if self.context and self.context.directive_args[0] == "doxygenpage": - nodelist.extend(self.render_iterable(node.ordered_children)) + nodelist.extend(self.render_iterable(node)) else: - contentNodeCands = self.render_iterable(node.content) + contentNodeCands = [] + for item in get_content(node): + contentNodeCands.extend(self.render_tagged(item)) # if there are consecutive nodes.Text we should collapse them # and rerender them to ensure the right paragraphifaction contentNodes: list[Node] = [] @@ -1501,12 +1651,12 @@ def visit_Node_docParaType(self, node: parser.Node_docParaType) -> list[Node]: continue # we have handled this node contentNodes.append(n) nodelist.extend(contentNodes) - nodelist.extend(self.render_iterable(node.images)) + nodelist.extend(self.render_iterable(get_images(node))) - paramList = self.render_iterable(node.parameterlist) + paramList = self.render_iterable(get_parameterlists(node)) defs = [] fields = [] - for n in self.render_iterable(node.simplesects): + for n in self.render_iterable(get_simplesects(node)): if isinstance(n, nodes.definition_list_item): defs.append(n) elif isinstance(n, nodes.field_list): @@ -1540,9 +1690,10 @@ def visit_Node_docParaType(self, node: parser.Node_docParaType) -> list[Node]: return [nodes.paragraph("", "", *nodelist)] - visit_Node_docParBlockType = render_iterable + visit_docparblock = node_handler(parser.Node_docParBlockType)(render_iterable) - def visit_Node_docBlockQuoteType(self, node: parser.Node_docBlockQuoteType) -> list[Node]: + @node_handler(parser.Node_docBlockQuoteType) + def visit_docblockquote(self, node: parser.Node_docBlockQuoteType) -> list[Node]: nodelist = self.render_iterable(node) # catch block quote attributions here; the tag is the only identifier, # and it is nested within a subsequent tag @@ -1553,7 +1704,8 @@ def visit_Node_docBlockQuoteType(self, node: parser.Node_docBlockQuoteType) -> l nodelist[-1] = nodes.attribution("", text) return [nodes.block_quote("", classes=[], *nodelist)] - def visit_Node_docImageType(self, node: parser.Node_docImageType) -> list[Node]: + @node_handler(parser.Node_docImageType) + def visit_docimage(self, node: parser.Node_docImageType) -> list[Node]: """Output docutils image node using name attribute from xml as the uri""" path_to_image = node.name @@ -1565,32 +1717,37 @@ def visit_Node_docImageType(self, node: parser.Node_docImageType) -> list[Node]: options = {"uri": path_to_image} return [nodes.image("", **options)] - def visit_Node_docURLLink(self, node: parser.Node_docURLLink) -> list[Node]: + @node_handler(parser.Node_docURLLink) + def visit_docurllink(self, node: parser.Node_docURLLink) -> list[Node]: """Url Link Renderer""" nodelist = self.render_iterable(node) return [nodes.reference("", "", refuri=node.url, *nodelist)] - def visit_Node_docMarkupType(self, node: parser.Node_docMarkupType) -> list[Node]: + @tagged_node_handler(parser.Node_docMarkupType) + def visit_docmarkup(self, tag: str, node: parser.Node_docMarkupType) -> list[Node]: nodelist = self.render_iterable(node) creator: Type[TextElement] = nodes.inline - if node.type_ == "emphasis": + if tag == "emphasis": creator = nodes.emphasis - elif node.type_ == "computeroutput": + elif tag == "computeroutput": creator = nodes.literal - elif node.type_ == "bold": + elif tag == "bold": creator = nodes.strong - elif node.type_ == "superscript": + elif tag == "superscript": creator = nodes.superscript - elif node.type_ == "subscript": + elif tag == "subscript": creator = nodes.subscript - elif node.type_ == "center": + elif tag == "center": print("Warning: does not currently handle 'center' text display") - elif node.type_ == "small": + elif tag == "small": print("Warning: does not currently handle 'small' text display") return [creator("", "", *nodelist)] - def visit_Node_docSect1Type(self, node: parser.Node_docSect1Type | parser.Node_docSect2Type | parser.Node_docSect3Type) -> list[Node]: + @node_handler(parser.Node_docSect1Type) + @node_handler(parser.Node_docSect2Type) + @node_handler(parser.Node_docSect3Type) + def visit_docsectN(self, node: parser.Node_docSect1Type | parser.Node_docSect2Type | parser.Node_docSect3Type) -> list[Node]: """ Docutils titles are defined by their level inside the document so the proper structure is only guaranteed by the Doxygen XML. @@ -1604,42 +1761,40 @@ def visit_Node_docSect1Type(self, node: parser.Node_docSect1Type | parser.Node_d section += self.create_doxygen_target(node) section += self.render_iterable(node) return [section] - - visit_Node_docSect2Type = visit_Node_docSect1Type - visit_Node_docSect3Type = visit_Node_docSect1Type - def visit_Node_docSimpleSectType(self, node: parser.Node_docSimpleSectType) -> list[Node]: + @node_handler(parser.Node_docSimpleSectType) + def visit_docsimplesect(self, node: parser.Node_docSimpleSectType) -> list[Node]: """Other Type documentation such as Warning, Note, Returns, etc""" # for those that should go into a field list, just render them as that, # and it will be pulled up later nodelist = self.render_iterable(node.para) - if node.kind in ("pre", "post", "return"): + if node.type in (parser.DoxSimpleSectKind.pre, parser.DoxSimpleSectKind.post, parser.DoxSimpleSectKind.return_): return [ nodes.field_list( "", nodes.field( "", - nodes.field_name("", nodes.Text(node.kind)), + nodes.field_name("", nodes.Text(node.type.value)), nodes.field_body("", *nodelist), ), ) ] - elif node.kind == "warning": + elif node.type == parser.DoxSimpleSectKind.warning: return [nodes.warning("", *nodelist)] - elif node.kind == "note": + elif node.type == parser.DoxSimpleSectKind.note: return [nodes.note("", *nodelist)] - elif node.kind == "see": + elif node.type == parser.DoxSimpleSectKind.see: return [addnodes.seealso("", *nodelist)] - elif node.kind == "remark": - nodelist.insert(0, nodes.title("", nodes.Text(node.kind.capitalize()))) - return [nodes.admonition("", classes=[node.kind], *nodelist)] + elif node.type == parser.DoxSimpleSectKind.remark: + nodelist.insert(0, nodes.title("", nodes.Text(node.type.value.capitalize()))) + return [nodes.admonition("", classes=[node.type.value], *nodelist)] - if node.kind == "par": + if node.type == parser.DoxSimpleSectKind.par: text = self.render(node.title) else: - text = [nodes.Text(node.kind.capitalize())] + text = [nodes.Text(node.type.value.capitalize())] # TODO: is this working as intended? there is something strange with the types title = nodes.strong("", "", *text) @@ -1648,12 +1803,12 @@ def visit_Node_docSimpleSectType(self, node: parser.Node_docSimpleSectType) -> l return [nodes.definition_list_item("", term, definition)] - visit_Node_docTitleType = render_iterable + visit_doctitle = node_handler(parser.Node_docTitleType)(render_iterable) - def visit_Node_docFormulaType(self, node: parser.Node_docFormulaType) -> list[Node]: + @node_handler(parser.Node_docFormulaType) + def visit_docformula(self, node: parser.Node_docFormulaType) -> list[Node]: nodelist: list[Node] = [] - for item in node: - latex = item.getValue() + for latex in node: docname = self.state.document.settings.env.docname # Strip out the doxygen markup that slips through # Either inline @@ -1674,7 +1829,8 @@ def visit_Node_docFormulaType(self, node: parser.Node_docFormulaType) -> list[No ) return nodelist - def visit_Node_listingType(self, node: parser.Node_listingType) -> list[Node]: + @node_handler(parser.Node_listingType) + def visit_listing(self, node: parser.Node_listingType) -> list[Node]: nodelist: list[Node] = [] for i, item in enumerate(node.codeline): # Put new lines between the lines @@ -1685,13 +1841,13 @@ def visit_Node_listingType(self, node: parser.Node_listingType) -> list[Node]: # Add blank string at the start otherwise for some reason it renders # the pending_xref tags around the kind in plain text block = nodes.literal_block("", "", *nodelist) - if node.domain: - block["language"] = node.domain + domain = filetypes.get_pygments_alias(node.filename) or filetypes.get_extension(node.filename) + if domain: + block["language"] = domain return [block] - visit_Node_codelineType = render_iterable - - visit_Node_highlightType = render_iterable + visit_codeline = node_handler(parser.Node_codelineType)(render_iterable) + visit_highlight = node_handler(parser.Node_highlightType)(render_iterable) def _nested_inline_parse_with_titles(self, content, node) -> str: """ @@ -1780,13 +1936,14 @@ def visit_verbatim(self, node) -> list[Node]: return [rst_node] - def visit_Node_incType(self, node: parser.Node_incType) -> list[Node]: + @node_handler(parser.Node_incType) + def visit_inc(self, node: parser.Node_incType) -> list[Node]: if not self.app.config.breathe_show_include: return [] compound_link: list[Node] = [nodes.Text(''.join(node))] if node.refid: - compound_link = self.visit_Node_docRefTextType(node) + compound_link = self.visit_docreftext(node) if node.local == "yes": text = [nodes.Text('#include "'), *compound_link, nodes.Text('"')] else: @@ -1794,15 +1951,18 @@ def visit_Node_incType(self, node: parser.Node_incType) -> list[Node]: return [nodes.container("", nodes.emphasis("", "", *text))] - def visit_Node_refType(self, node: parser.Node_refType) -> list[Node]: + @node_handler(parser.Node_refType) + def visit_ref(self, node: parser.Node_refType) -> list[Node]: def get_node_info(file_data): name = ''.join(node) name = name.rsplit("::", 1)[-1] - return name, file_data.compounddef.kind + assert len(file_data.compounddef) == 1 + return name, file_data.compounddef[0].kind return self.visit_compound(node, False, get_node_info=get_node_info) - def visit_Node_docListItemType(self, node: parser.Node_docListItemType) -> list[Node]: + @node_handler(parser.Node_docListItemType) + def visit_doclistitem(self, node: parser.Node_docListItemType) -> list[Node]: """List item renderer. Render all the children depth-first. Upon return expand the children node list into a docutils list-item. """ @@ -1823,25 +1983,27 @@ def render_enumerated(self, children, nesting_level) -> list[Node]: nodelist_list["suffix"] = "." return [nodelist_list] - def visit_Node_docListType(self, node: parser.Node_docListType) -> list[Node]: + @tagged_node_handler(parser.Node_docListType) + def visit_doclist(self, tag: str, node: parser.Node_docListType) -> list[Node]: """List renderer The specifics of the actual list rendering are handled by the decorator around the generic render function. Render all the children depth-first.""" """ Call the wrapped render function. Update the nesting level for the enumerated lists. """ - if node.node_subtype == "itemized": - val = self.render_iterable(node.listitem) + if tag == "itemizedlist": + val = self.render_iterable(node) return self.render_unordered(children=val) - elif node.node_subtype == "ordered": + elif tag == "orderedlist": self.nesting_level += 1 - val = self.render_iterable(node.listitem) + val = self.render_iterable(node) self.nesting_level -= 1 return self.render_enumerated(children=val, nesting_level=self.nesting_level) return [] - def visit_Node_compoundRefType(self, node: parser.Node_compoundRefType) -> list[Node]: - nodelist = self.render_iterable(node) + @node_handler(parser.Node_compoundRefType) + def visit_compoundref(self, node: parser.Node_compoundRefType) -> list[Node]: + nodelist: list[Node] = self.render_iterable(node) refid = None if node.refid is not None: refid = self.get_refid(node.refid) @@ -1859,7 +2021,8 @@ def visit_Node_compoundRefType(self, node: parser.Node_compoundRefType) -> list[ ] return nodelist - def visit_Node_docXRefSectType(self, node: parser.Node_docXRefSectType) -> list[Node]: + @node_handler(parser.Node_docXRefSectType) + def visit_docxrefsect(self, node: parser.Node_docXRefSectType) -> list[Node]: assert self.app.env is not None signode = addnodes.desc_signature() @@ -1888,7 +2051,8 @@ def visit_Node_docXRefSectType(self, node: parser.Node_docXRefSectType) -> list[ return [descnode] - def visit_Node_docVariableListType(self, node: parser.Node_docVariableListType) -> list[Node]: + @node_handler(parser.Node_docVariableListType) + def visit_docvariablelist(self, node: parser.Node_docVariableListType) -> list[Node]: output: list[Node] = [] for n in node: descnode = addnodes.desc() @@ -1898,18 +2062,21 @@ def visit_Node_docVariableListType(self, node: parser.Node_docVariableListType) signode += self.render_optional(n.varlistentry) descnode += signode contentnode = addnodes.desc_content() - contentnode += self.render_iterable(n.listitem.para) + contentnode += self.render_iterable(n.listitem) descnode += contentnode output.append(descnode) return output - def visit_Node_docVarListEntryType(self, node: parser.Node_docVarListEntryType) -> list[Node]: + @node_handler(parser.Node_docVarListEntryType) + def visit_docvarlistentry(self, node: parser.Node_docVarListEntryType) -> list[Node]: return self.render_iterable(node.term) - def visit_Node_docAnchorType(self, node: parser.Node_docAnchorType) -> list[Node]: + @node_handler(parser.Node_docAnchorType) + def visit_docanchor(self, node: parser.Node_docAnchorType) -> list[Node]: return list(self.create_doxygen_target(node)) - def visit_Node_docEntryType(self, node: parser.Node_docEntryType) -> list[Node]: + @node_handler(parser.Node_docEntryType) + def visit_docentry(self, node: parser.Node_docEntryType) -> list[Node]: col = nodes.entry() col += self.render_iterable(node.para) if node.thead == "yes": @@ -1920,7 +2087,8 @@ def visit_Node_docEntryType(self, node: parser.Node_docEntryType) -> list[Node]: col["morecols"] = int(node.colspan) - 1 return [col] - def visit_Node_docRowType(self, node: parser.Node_docRowType) -> list[Node]: + @node_handler(parser.Node_docRowType) + def visit_docrow(self, node: parser.Node_docRowType) -> list[Node]: row = nodes.row() cols = self.render_iterable(node.entry) elem: Union[nodes.thead, nodes.tbody] @@ -1932,7 +2100,8 @@ def visit_Node_docRowType(self, node: parser.Node_docRowType) -> list[Node]: elem.append(row) return [elem] - def visit_Node_docTableType(self, node: parser.Node_docTableType) -> list[Node]: + @node_handler(parser.Node_docTableType) + def visit_doctable(self, node: parser.Node_docTableType) -> list[Node]: table = nodes.table() table["classes"] += ["colwidths-auto"] tgroup = nodes.tgroup(cols=node.cols) @@ -1943,7 +2112,7 @@ def visit_Node_docTableType(self, node: parser.Node_docTableType) -> list[Node]: table += tgroup rows = self.render_iterable(node.row) - # this code depends on visit_Node_docRowType(), and expects the same elements used to + # this code depends on visit_docrow(), and expects the same elements used to # "envelop" rows there, namely thead and tbody (eg it will need to be updated # if Doxygen one day adds support for tfoot) @@ -1963,45 +2132,42 @@ def merge_row_types(root, elem, elems): return [table] - def visit_mixedcontainer(self, node: compoundsuper.MixedContainer) -> list[Node]: - return self.render_optional(node.getValue()) + visit_description = node_handler(parser.Node_descriptionType)(render_iterable) - visit_Node_descriptionType = render_iterable + visit_linkedtext = node_handler(parser.Node_linkedTextType)(render_iterable) - visit_Node_linkedTextType = render_iterable - - def visit_function(self, node) -> list[Node]: + def visit_function(self, node: parser.Node_memberdefType) -> list[Node]: dom = self.get_domain() if not dom or dom in ("c", "cpp", "py", "cs"): names = self.get_qualification() - names.append(node.get_name()) + names.append(node.name) name = self.join_nested_name(names) if dom == "py": - declaration = name + node.get_argsstring() + declaration = name + (node.argsstring or '') elif dom == "cs": declaration = " ".join( [ self.create_template_prefix(node), - "".join(n.astext() for n in self.render(node.get_type())), + "".join(cast(str,n.astext()) for n in self.render(node.type)), name, - node.get_argsstring(), + node.argsstring or '', ] ) else: elements = [self.create_template_prefix(node)] - if node.static == "yes": + if node.static: elements.append("static") - if node.inline == "yes": + if node.inline: elements.append("inline") if node.kind == "friend": elements.append("friend") - if node.virt in ("virtual", "pure-virtual"): + if node.virt in (parser.DoxVirtualKind.virtual, parser.DoxVirtualKind.pure_virtual): elements.append("virtual") - if node.explicit == "yes": + if node.explicit: elements.append("explicit") # TODO: handle constexpr when parser has been updated # but Doxygen seems to leave it in the type anyway - typ = "".join(n.astext() for n in self.render(node.get_type())) + typ = "".join(n.astext() for n in self.render(node.type)) # Doxygen sometimes leaves 'static' in the type, # e.g., for "constexpr static auto f()" typ = typ.replace("static ", "") @@ -2012,9 +2178,9 @@ def visit_function(self, node) -> list[Node]: typ = typ[7:] elements.append(typ) elements.append(name) - elements.append(node.get_argsstring()) + elements.append(node.argsstring or '') declaration = " ".join(elements) - nodes = self.handle_declaration(node, declaration) + nodes = self.handle_declaration(node, node.kind.value, declaration) return nodes else: # Get full function signature for the domain directive. @@ -2030,21 +2196,21 @@ def visit_function(self, node) -> list[Node]: ) # Add CV-qualifiers. - if node.const == "yes": + if node.const: signature += " const" # The doxygen xml output doesn't register 'volatile' as the xml attribute for functions # until version 1.8.8 so we also check argsstring: # https://bugzilla.gnome.org/show_bug.cgi?id=733451 - if node.volatile == "yes" or node.argsstring.endswith("volatile"): + if node.volatile or (node.argsstring and node.argsstring.endswith("volatile")): signature += " volatile" - if node.refqual == "lvalue": + if node.refqual == parser.DoxRefQualifierKind.lvalue: signature += "&" - elif node.refqual == "rvalue": + elif node.refqual == parser.DoxRefQualifierKind.rvalue: signature += "&&" # Add `= 0` for pure virtual members. - if node.virt == "pure-virtual": + if node.virt == parser.DoxVirtualKind.pure_virtual: signature += "= 0" assert self.context is not None @@ -2053,6 +2219,7 @@ def visit_function(self, node) -> list[Node]: nodes = self.run_domain_directive(node.kind, self.context.directive_args[1]) assert self.app.env is not None + target = None if self.app.env.config.breathe_debug_trace_doxygen_ids: target = self.create_doxygen_target(node) if len(target) == 0: @@ -2065,24 +2232,26 @@ def visit_function(self, node) -> list[Node]: rst_node = nodes[1] finder = NodeFinder(rst_node.document) rst_node.walk(finder) + assert finder.content is not None # Templates have multiple signature nodes in recent versions of Sphinx. # Insert Doxygen target into the first signature node. if not self.app.env.config.breathe_debug_trace_doxygen_ids: target = self.create_doxygen_target(node) + assert target is not None rst_node.children[0].insert(0, target) finder.content.extend(self.description(node)) return nodes - def visit_define(self, node) -> list[Node]: + def visit_define(self, node: parser.Node_memberdefType) -> list[Node]: declaration = node.name if node.param: declaration += "(" for i, parameter in enumerate(node.param): if i: declaration += ", " - declaration += parameter.defname + if parameter.defname: declaration += parameter.defname declaration += ")" # TODO: remove this once Sphinx supports definitions for macros @@ -2091,9 +2260,9 @@ def add_definition(declarator: Declarator) -> None: declarator.append(nodes.Text(" ")) declarator.extend(self.render(node.initializer)) - return self.handle_declaration(node, declaration, declarator_callback=add_definition) + return self.handle_declaration(node, node.kind.value, declaration, declarator_callback=add_definition) - def visit_enum(self, node) -> list[Node]: + def visit_enum(self, node: parser.Node_memberdefType) -> list[Node]: def content(contentnode): contentnode.extend(self.description(node)) values = nodes.emphasis("", nodes.Text("Values:")) @@ -2106,34 +2275,35 @@ def content(contentnode): names.append(node.name) declaration = self.join_nested_name(names) dom = self.get_domain() - if (not dom or dom == "cpp") and node.strong == "yes": + if (not dom or dom == "cpp") and node.strong: # It looks like Doxygen does not make a difference # between 'enum class' and 'enum struct', # so render them both as 'enum class'. obj_type = "enum-class" - underlying_type = "".join(n.astext() for n in self.render(node.type_)) + underlying_type = "".join(n.astext() for n in self.render(node.type)) if len(underlying_type.strip()) != 0: declaration += " : " declaration += underlying_type else: obj_type = "enum" return self.handle_declaration( - node, declaration, obj_type=obj_type, content_callback=content + node, obj_type, declaration, content_callback=content ) - def visit_Node_enumvalueType(self, node: parser.Node_enumvalueType) -> list[Node]: + @node_handler(parser.Node_enumvalueType) + def visit_enumvalue(self, node: parser.Node_enumvalueType) -> list[Node]: if self.app.config.breathe_show_enumvalue_initializer: declaration = node.name + self.make_initializer(node) else: declaration = node.name - return self.handle_declaration(node, declaration, obj_type="enumvalue") + return self.handle_declaration(node, "enumvalue", declaration) - def visit_typedef(self, node) -> list[Node]: - type_ = "".join(n.astext() for n in self.render(node.get_type())) + def visit_typedef(self, node: parser.Node_memberdefType) -> list[Node]: + type_ = "".join(n.astext() for n in self.render(node.type)) names = self.get_qualification() - names.append(node.get_name()) + names.append(node.name) name = self.join_nested_name(names) - if node.definition.startswith("using "): + if node.definition and node.definition.startswith("using "): # TODO: looks like Doxygen does not generate the proper XML # for the template parameter list declaration = self.create_template_prefix(node) @@ -2144,8 +2314,8 @@ def visit_typedef(self, node) -> list[Node]: # contain the full text. If a @typedef was used instead, the # definition has only the typename, which makes it impossible to # distinguish between them so fallback to "typedef" behavior here. - declaration = " ".join([type_, name, node.get_argsstring()]) - return self.handle_declaration(node, declaration) + declaration = " ".join([type_, name, node.argsstring or '']) + return self.handle_declaration(node, node.kind.value, declaration) def make_initializer(self, node) -> str: initializer = node.initializer @@ -2162,7 +2332,7 @@ def make_initializer(self, node) -> str: signature.extend(render_nodes) return "".join(n.astext() for n in signature) - def visit_variable(self, node) -> list[Node]: + def visit_variable(self, node: parser.Node_memberdefType) -> list[Node]: names = self.get_qualification() names.append(node.name) name = self.join_nested_name(names) @@ -2177,26 +2347,26 @@ def visit_variable(self, node) -> list[Node]: declaration = " ".join( [ self.create_template_prefix(node), - "".join(n.astext() for n in self.render(node.get_type())), + "".join(n.astext() for n in self.render(node.type)), name, - node.get_argsstring(), + node.argsstring or '', ] ) - if node.get_gettable() or node.get_settable(): + if node.gettable or node.settable: declaration += "{" - if node.get_gettable(): + if node.gettable: declaration += "get;" - if node.get_settable(): + if node.settable: declaration += "set;" declaration += "}" declaration += self.make_initializer(node) else: elements = [self.create_template_prefix(node)] - if node.static == "yes": + if node.static: elements.append("static") - if node.mutable == "yes": + if node.mutable: elements.append("mutable") - typename = "".join(n.astext() for n in self.render(node.get_type())) + typename = "".join(n.astext() for n in self.render(node.type)) # Doxygen sometimes leaves 'static' in the type, # e.g., for "constexpr static int i" typename = typename.replace("static ", "") @@ -2204,15 +2374,15 @@ def visit_variable(self, node) -> list[Node]: typename = typename.replace("::", ".") elements.append(typename) elements.append(name) - elements.append(node.get_argsstring()) + elements.append(node.argsstring or '') elements.append(self.make_initializer(node)) declaration = " ".join(elements) if not dom or dom in ("c", "cpp", "py", "cs"): - return self.handle_declaration(node, declaration, options=options) + return self.handle_declaration(node, node.kind.value, declaration, options=options) else: return self.render_declaration(node, declaration) - def visit_friendclass(self, node) -> list[Node]: + def visit_friendclass(self, node: parser.Node_memberdefType) -> list[Node]: dom = self.get_domain() assert not dom or dom == "cpp" @@ -2222,7 +2392,7 @@ def visit_friendclass(self, node) -> list[Node]: signode = addnodes.desc_signature() desc += signode - typ = "".join(n.astext() for n in self.render(node.get_type())) + typ = "".join(n.astext() for n in self.render(node.type)) # in Doxygen < 1.9 the 'friend' part is there, but afterwards not # https://github.com/michaeljones/breathe/issues/616 assert typ in ("friend class", "friend struct", "class", "struct") @@ -2262,7 +2432,7 @@ def visit_templateparam( dom = "cpp" appendDeclName = True if insertDeclNameByParsing: - if dom == "cpp" and sphinx.version_info >= (4, 1, 0): + if dom == "cpp" and sphinx.version_info >= (4, 1, 0): # type: ignore parser = cpp.DefinitionParser( "".join(n.astext() for n in nodelist), location=self.state.state_machine.get_source_and_line(), @@ -2310,7 +2480,8 @@ def visit_templateparam( return nodelist - def visit_Node_templateparamlistType(self, node: parser.Node_templateparamlistType) -> list[Node]: + @node_handler(parser.Node_templateparamlistType) + def visit_templateparamlist(self, node: parser.Node_templateparamlistType) -> list[Node]: nodelist: list[Node] = [] self.output_defname = False for i, item in enumerate(node.param): @@ -2320,20 +2491,22 @@ def visit_Node_templateparamlistType(self, node: parser.Node_templateparamlistTy self.output_defname = True return nodelist - def visit_Node_docParamListType(self, node: parser.Node_docParamListType) -> list[Node]: + @node_handler(parser.Node_docParamListType) + def visit_docparamlist(self, node: parser.Node_docParamListType) -> list[Node]: """Parameter/Exception/TemplateParameter documentation""" + has_retval = sphinx.version_info[0:2] < (4, 3) # type: ignore fieldListName = { - "param": "param", - "exception": "throws", - "templateparam": "tparam", + parser.DoxParamListKind.param: "param", + parser.DoxParamListKind.exception: "throws", + parser.DoxParamListKind.templateparam: "tparam", # retval support available on Sphinx >= 4.3 - "retval": "returns" if sphinx.version_info[0:2] < (4, 3) else "retval", + parser.DoxParamListKind.retval: "returns" if has_retval else "retval", } # https://docutils.sourceforge.io/docs/ref/doctree.html#field-list fieldList = nodes.field_list() - for item in node.parameteritem: + for item in node: # TODO: does item.parameternamelist really have more than 1 parametername? assert len(item.parameternamelist) <= 1, item.parameternamelist nameNodes: list[Node] = [] @@ -2343,14 +2516,13 @@ def visit_Node_docParamListType(self, node: parser.Node_docParamListType) -> lis if len(paramNameNodes) != 0: nameNodes = [] for paramName in paramNameNodes: - content = paramName.content_ # this is really a list of MixedContainer objects, i.e., a generic object # we assume there is either 1 or 2 elements, if there is 2 the first is the # parameter direction - assert len(content) == 1 or len(content) == 2, content - thisName = self.render(content[-1]) + assert len(paramName) == 1 or len(paramName) == 2, list(paramName) + thisName = self.render(paramName[-1]) if len(nameNodes) != 0: - if node.kind == "exception": + if node.kind == parser.DoxParamListKind.exception: msg = "Doxygen \\exception commands with multiple names can not be" msg += " converted to a single :throws: field in Sphinx." msg += " Exception '{}' suppresed from output.".format( @@ -2360,10 +2532,10 @@ def visit_Node_docParamListType(self, node: parser.Node_docParamListType) -> lis continue nameNodes.append(nodes.Text(", ")) nameNodes.extend(thisName) - if len(content) == 2: + if len(paramName) == 2: # note, each paramName node seems to have the same direction, # so just use the last one - dir = "".join(n.astext() for n in self.render(content[0])).strip() + dir = "".join(n.astext() for n in self.render(paramName[0])).strip() assert dir in ("[in]", "[out]", "[inout]"), ">" + dir + "<" parameterDirectionNodes = [nodes.strong(dir, dir), nodes.Text(" ", " ")] # it seems that Sphinx expects the name to be a single node, @@ -2443,7 +2615,8 @@ def visit_docdotfile(self, node) -> list[Node]: return [nodes.figure("", graph_node, caption_node)] return [graph_node] - def visit_Node_graphType(self, node: parser.Node_graphType) -> list[Node]: + @node_handler(parser.Node_graphType) + def visit_docgraph(self, node: parser.Node_graphType) -> list[Node]: """Create a graph (generated by doxygen - not user-defined) from XML using dot syntax.""" # use graphs' legend from doxygen (v1.9.1) @@ -2511,33 +2684,35 @@ def visit_unknown(self, node) -> list[Node]: """Visit a node of unknown type.""" return [] - def visit_Node_CompoundType(self, node: parser.Node_CompoundType) -> list[Node]: + @node_handler(parser.Node_CompoundType) + def dispatch_compound(self, node: parser.Node_CompoundType) -> list[Node]: """Dispatch handling of a compound node to a suitable visit method.""" if node.kind in [parser.CompoundKind.file, parser.CompoundKind.dir, parser.CompoundKind.page, parser.CompoundKind.example, parser.CompoundKind.group]: return self.visit_file(node) return self.visit_compound(node) - def dispatch_Node_memberdefType(self, node: parser.Node_memberdefType) -> list[Node]: + @node_handler(parser.Node_memberdefType) + def dispatch_memberdef(self, node: parser.Node_memberdefType) -> list[Node]: """Dispatch handling of a memberdef node to a suitable visit method.""" - if node.kind in ("function", "signal", "slot") or ( - node.kind == "friend" and node.argsstring + if node.kind in (parser.DoxMemberKind.function, parser.DoxMemberKind.signal, parser.DoxMemberKind.slot) or ( + node.kind == parser.DoxMemberKind.friend and node.argsstring ): return self.visit_function(node) - if node.kind == "enum": + if node.kind == parser.DoxMemberKind.enum: return self.visit_enum(node) - if node.kind == "typedef": + if node.kind == parser.DoxMemberKind.typedef: return self.visit_typedef(node) - if node.kind == "variable": + if node.kind == parser.DoxMemberKind.variable: return self.visit_variable(node) - if node.kind == "property": + if node.kind == parser.DoxMemberKind.property: # Note: visit like variable for now return self.visit_variable(node) - if node.kind == "event": + if node.kind == parser.DoxMemberKind.event: # Note: visit like variable for now return self.visit_variable(node) - if node.kind == "define": + if node.kind == parser.DoxMemberKind.define: return self.visit_define(node) - if node.kind == "friend": + if node.kind == parser.DoxMemberKind.friend: # note, friend functions should be dispatched further up return self.visit_friendclass(node) return self.render_declaration(node, update_signature=self.update_signature) @@ -2548,6 +2723,7 @@ def dispatch_Node_memberdefType(self, node: parser.Node_memberdefType) -> list[N # "docdot": visit_docdot, #} + @node_handler(str) def render_string(self, node: str) -> list[Node]: # Skip any nodes that are pure whitespace # Probably need a better way to do this as currently we're only doing @@ -2576,20 +2752,28 @@ def render_string(self, node: str) -> list[Node]: if node == " ": return [nodes.Text(node)] return [] + + def render_tagged(self, item: parser.TaggedValue[str,parser.Node] | str) -> list[Node]: + if isinstance(item,str): return self.render_string(item) + h = self.tagged_node_handlers.get(type(item.value)) + if h is not None: + assert self.context is not None + with WithContext(self, self.context.create_child_context(item.value)): + if not self.filter_.allow(self.context.node_stack): return [] + return h(self,item.name,item.value) + return self.render(item.value) - def render(self, node, context: Optional[RenderContext] = None) -> list[Node]: - assert self.context is not None + def render(self, node: parser.Node, context: RenderContext | None = None) -> list[Node]: if context is None: + assert self.context is not None context = self.context.create_child_context(node) with WithContext(self, context): + assert self.context is not None result: list[Node] = [] if not self.filter_.allow(self.context.node_stack): pass - elif isinstance(node, str): - result = self.render_string(node) else: - assert type(node).__name__.startswith('Node_') - method = getattr(SphinxRenderer, 'visit_'+type(node).__name__, SphinxRenderer.visit_unknown) + method = self.node_handlers.get(type(node), SphinxRenderer.visit_unknown) result = method(self, node) return result diff --git a/tests/test_renderer.py b/tests/test_renderer.py index c38317e1..620ecef7 100644 --- a/tests/test_renderer.py +++ b/tests/test_renderer.py @@ -74,7 +74,7 @@ def __init__(self, app): env.temp_data["docname"] = "mock-doc" env.temp_data["breathe_project_info_factory"] = ProjectInfoFactory(app) env.temp_data["breathe_parser_factory"] = DoxygenParserFactory(app) - settings = frontend.OptionParser(components=(docutils.parsers.rst.Parser,)).get_default_values() + settings = frontend.get_default_settings(docutils.parsers.rst.Parser) # type: ignore settings.env = env self.document = utils.new_document("", settings) @@ -142,7 +142,7 @@ def __init__(self): pass def create_target(self, refid): - pass + return [] class MockDocument: @@ -161,7 +161,7 @@ def __init__(self, compound_dict): class MockFileData: def __init__(self, compounddef): - self.compounddef = compounddef + self.compounddef = [compounddef] def parse(self, compoundname): compounddef = self.compound_dict[compoundname] @@ -517,8 +517,6 @@ def get_directive(app): def get_matches(datafile): - from xml.dom import minidom - argsstrings = [] with open(os.path.join(os.path.dirname(__file__), "data", datafile)) as fid: xml = fid.read() diff --git a/tests/test_utils.py b/tests/test_utils.py index 5e43b3bd..37228044 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -1,5 +1,4 @@ from unittest import TestCase -from xml.dom import minidom from breathe.renderer.sphinxrenderer import get_param_decl, get_definition_without_template_args from breathe import path_handler, parser diff --git a/xml_parser_generator/module_template.c b/xml_parser_generator/module_template.c index 0697b896..80da77a0 100644 --- a/xml_parser_generator/module_template.c +++ b/xml_parser_generator/module_template.c @@ -10,6 +10,7 @@ #include #include #include +#include #pragma GCC diagnostic ignored "-Wunused-parameter" @@ -43,6 +44,8 @@ enum { CLASS_FROZEN_LIST = 0, CLASS_FROZEN_LIST_ITR, CLASS_TAGGED_VALUE, + CLASS_PARSE_ERROR, + CLASS_PARSE_WARNING, //% for type in types|select('used_directly') CLASS__{$ type $}, //% endfor @@ -83,12 +86,6 @@ enum { static PyModuleDef module_def; typedef struct { - /* the type of the exception thrown for errors in the input */ - PyObject *parse_error_exc_type; - - /* the type of the warning category for ignorable problems in the input */ - PyObject *parse_warn_exc_type; - PyObject *tag_names[TAGGED_UNION_NAME_COUNT]; //% if char_enum_chars @@ -161,22 +158,39 @@ static void pop_callbacks(parse_state *state) { } static void set_parse_error(parse_state *state,const char *msg) { - PyErr_Format( - state->py->parse_error_exc_type, - "Error on line %li: %s", - (long)XML_GetCurrentLineNumber(state->parser), - msg); + PyObject *exc = PyObject_CallFunction( + (PyObject*)state->py->classes[CLASS_PARSE_ERROR], + "sl", + XML_ErrorString(XML_GetErrorCode(state->parser)), + (long)XML_GetCurrentLineNumber(state->parser)); + if(exc == NULL) return; + PyErr_SetObject((PyObject*)state->py->classes[CLASS_PARSE_ERROR],exc); + Py_DECREF(exc); +} +static void set_parse_error_format(parse_state *state,const char *msg,...) { + PyObject *msg_obj; + PyObject *exc; + va_list vargs; + + va_start(vargs,msg); + msg_obj = PyUnicode_FromFormatV(msg,vargs); + va_end(vargs); + if(msg_obj == NULL) return; + + exc = PyObject_CallFunction( + (PyObject*)state->py->classes[CLASS_PARSE_ERROR], + "Ol", + msg_obj, + (long)XML_GetCurrentLineNumber(state->parser)); + Py_DECREF(msg_obj); + if(exc == NULL) return; + PyErr_SetObject((PyObject*)state->py->classes[CLASS_PARSE_ERROR],exc); + Py_DECREF(exc); } -#define SET_PARSE_ERROR_FMT(state,msg,...) \ - PyErr_Format(\ - state->py->parse_error_exc_type,\ - "Error on line %li: " msg,\ - (long)XML_GetCurrentLineNumber(state->parser),\ - __VA_ARGS__) static int set_parse_warning(parse_state *state,const char *msg) { return PyErr_WarnFormat( - state->py->parse_warn_exc_type, + (PyObject*)state->py->classes[CLASS_PARSE_WARNING], 1, "Warning on line %li: %s", (long)XML_GetCurrentLineNumber(state->parser), @@ -184,7 +198,7 @@ static int set_parse_warning(parse_state *state,const char *msg) { } #define SET_PARSE_WARNING_FMT(state,msg,...) \ PyErr_WarnFormat(\ - state->py->parse_warn_exc_type,\ + (PyObject*)state->py->classes[CLASS_PARSE_WARNING],\ 1,\ "Warning on line %li: " msg,\ (long)XML_GetCurrentLineNumber(state->parser),\ @@ -326,7 +340,7 @@ static PyMemberDef tagged_value_members[] = { {"value",T_OBJECT_EX,offsetof(tagged_value,values) + sizeof(PyObject*),READONLY,NULL}, {NULL}}; -PyObject *tagged_value_tp_new(PyTypeObject *subtype,PyObject *args,PyObject *kwds) { +static PyObject *tagged_value_tp_new(PyTypeObject *subtype,PyObject *args,PyObject *kwds) { tagged_value *r; if(kwds != NULL && PyDict_Size(kwds)) { PyErr_SetString(PyExc_TypeError,"TaggedValue.__new__ does not take any keyword arguments"); @@ -365,7 +379,7 @@ typedef struct { PyObject **content; } frozen_list; -/* A type doesn't satisfy collections.abc.Iterable unless is has an __iter__ +/* A type doesn't satisfy collections.abc.Iterable unless it has an __iter__ method */ typedef struct { PyObject_HEAD @@ -571,6 +585,54 @@ static PyType_Slot frozen_list_itr_slots[] = { {0,NULL} }; +static PyObject *parse_error_get_args(PyObject *self) { + PyObject *args = PyObject_GetAttrString(self,"args"); + if(args == NULL) return NULL; + if(!PyTuple_Check(args) || PyTuple_Size(args) < 2) { + PyErr_SetString(PyExc_TypeError,"\"self.args\" is supposed to be a tuple with a length of 2"); + Py_DECREF(args); + return NULL; + } + return args; +} + +static PyObject *parse_error_tp_str(PyObject *self) { + PyObject *r; + PyObject *lineno; + PyObject *args = parse_error_get_args(self); + if(args == NULL) return NULL; + lineno = PyTuple_GetItem(args,1); + if(lineno == Py_None) r = PyUnicode_FromFormat("Error: %S",lineno,PyTuple_GetItem(args,0)); + else r = PyUnicode_FromFormat("Error on line %S: %S",lineno,PyTuple_GetItem(args,0)); + Py_DECREF(args); + return r; +} + +static PyObject *parse_error_get(PyObject *self,void *i) { + PyObject *r; + PyObject *args = parse_error_get_args(self); + if(args == NULL) return NULL; + r = PyTuple_GetItem(args,(Py_ssize_t)i); + Py_INCREF(r); + return r; +} + +static PyGetSetDef parse_error_getset[] = { + {"message",parse_error_get,NULL,NULL,(void*)0}, + {"lineno",parse_error_get,NULL,NULL,(void*)1}, + {NULL} +}; + +static PyType_Slot parse_error_slots[] = { + {Py_tp_str,parse_error_tp_str}, + {Py_tp_getset,parse_error_getset}, + {0,NULL} +}; + +static PyType_Slot parse_warning_slots[] = { + {0,NULL} +}; + typedef enum { //% for n in element_names @@ -817,7 +879,7 @@ static PyObject **frozen_list_push_tuple_item(parse_state *state,Py_ssize_t tupl tuple_item *new_tuple; if(fl->size && ((tuple_item*)fl->content[fl->size-1])->fields[tuple_size-1] == NULL) { - SET_PARSE_ERROR_FMT( + set_parse_error_format( state, "\"%s\" element can only come after \"%s\" element or be the first in its group", field_names[0], @@ -834,7 +896,7 @@ static PyObject **frozen_list_push_tuple_item(parse_state *state,Py_ssize_t tupl } if(!fl->size || ((tuple_item*)fl->content[fl->size-1])->fields[tuple_i-1] == NULL) { - SET_PARSE_ERROR_FMT( + set_parse_error_format( state, "\"%s\" element can only come after \"%s\" element", field_names[tuple_i], @@ -851,7 +913,7 @@ static int frozen_list_check_complete_tuple(parse_state *state,Py_ssize_t tuple_ Py_ssize_t i = tuple_size; while(last->fields[i-1] == NULL) --i; if(i != tuple_size) { - SET_PARSE_ERROR_FMT( + set_parse_error_format( state, "\"%s\" element must come after \"%s\" element", field_names[i], @@ -999,22 +1061,22 @@ static int warn_duplicate_attribute(parse_state *state,const char *name) { } static void raise_duplicate_element_error(parse_state *state,const char *name) { - SET_PARSE_ERROR_FMT(state,"\"%s\" cannot appear more than once in this context",name); + set_parse_error_format(state,"\"%s\" cannot appear more than once in this context",name); } static void raise_missing_element_error(parse_state *state,const char *name) { - SET_PARSE_ERROR_FMT(state,"missing \"%s\" child",name); + set_parse_error_format(state,"missing \"%s\" child",name); } static void raise_empty_list_element_error(parse_state *state,const char *name) { - SET_PARSE_ERROR_FMT(state,"at least one \"%s\" child is required",name); + set_parse_error_format(state,"at least one \"%s\" child is required",name); } static void raise_invalid_enum_error(parse_state *state,const char *value) { - SET_PARSE_ERROR_FMT(state,"\"%s\" is not one of the allowed enumeration values",value); + set_parse_error_format(state,"\"%s\" is not one of the allowed enumeration values",value); } static void raise_invalid_char_enum_error(parse_state *state,char c,const char *allowed) { - SET_PARSE_ERROR_FMT(state,"\"%c\" is not one of the allowed character values; must be one of \"%s\"",c,allowed); + set_parse_error_format(state,"\"%c\" is not one of the allowed character values; must be one of \"%s\"",c,allowed); } int parse_integer(parse_state *state,const char *str,long *value) { @@ -1053,7 +1115,7 @@ static int set_DoxBool_attribute(parse_state *state,PyObject **field,const XML_C if(strcmp(attr[1],"yes") == 0) *field = Py_True; else if(strcmp(attr[1],"no") == 0) *field = Py_False; else { - SET_PARSE_ERROR_FMT(state,"\"%s\" must be \"yes\" or \"no\"",attr[0]); + set_parse_error_format(state,"\"%s\" must be \"yes\" or \"no\"",attr[0]); return -1; } Py_INCREF(*field); @@ -1282,6 +1344,7 @@ static int node_class_new_fields_end__{$ type $}(module_state *state,PyObject ** if(fields[FIELD__{$ type $}__{$ ref.py_name $}] == NULL) { //% if ref.is_list fields[FIELD__{$ type $}__{$ ref.py_name $}] = (PyObject*)create_frozen_list(state); + if(fields[FIELD__{$ type $}__{$ ref.py_name $}] == NULL) return -1; //% elif ref.min_items == 0 fields[FIELD__{$ type $}__{$ ref.py_name $}] = Py_None; Py_INCREF(Py_None); @@ -1648,6 +1711,8 @@ static spec_and_is_list class_specs[] = { {{FULL_MODULE_STR ".FrozenList",sizeof(frozen_list),0,Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC|Py_TPFLAGS_SEQUENCE,frozen_list_slots},0}, {{FULL_MODULE_STR ".FrozenListItr",sizeof(frozen_list_itr),0,Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_GC,frozen_list_itr_slots},0}, {{FULL_MODULE_STR ".TaggedValue",sizeof(tagged_value),0,Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_GC|Py_TPFLAGS_SEQUENCE,tagged_value_slots},0}, + {{FULL_MODULE_STR ".ParseError",0,0,Py_TPFLAGS_DEFAULT,parse_error_slots},0}, + {{FULL_MODULE_STR ".ParseWarning",0,0,Py_TPFLAGS_DEFAULT,parse_warning_slots},0}, //% for type in types|select('used_directly') {{FULL_MODULE_STR ".Node_{$ type $}",offsetof(node_{$ common_affix(type) $}_common,fields){% if type is has_fields %} + sizeof(PyObject*)*FIELD_COUNT__{$ type $}{% endif %},0,Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_GC,node_class_slots__{$ type $}},{$ '1' if type is list_e else '0' $}}, //% endfor @@ -1658,11 +1723,14 @@ static spec_and_is_list class_specs[] = { static void raise_expat_error(parse_state *state) { if(!PyErr_Occurred()) { - PyErr_Format( - state->py->parse_error_exc_type, - "Error on line %i: %s", - XML_GetErrorLineNumber(state->parser), - XML_ErrorString(XML_GetErrorCode(state->parser))); + PyObject *exc = PyObject_CallFunction( + (PyObject*)state->py->classes[CLASS_PARSE_ERROR], + "sl", + XML_ErrorString(XML_GetErrorCode(state->parser)), + (long)XML_GetErrorLineNumber(state->parser)); + if(exc == NULL) return; + PyErr_SetObject((PyObject*)state->py->classes[CLASS_PARSE_ERROR],exc); + Py_DECREF(exc); } } @@ -1847,7 +1915,14 @@ static PyObject *parse(PyObject *module,expat_source source,PyObject *obj) { } if(r_obj == NULL) { - PyErr_SetString(state.py->parse_error_exc_type,"document without a recognized root element"); + PyObject *exc = PyObject_CallFunction( + (PyObject*)state.py->classes[CLASS_PARSE_ERROR], + "sO", + "document without a recognized root element", + Py_None); + if(exc == NULL) return NULL; + PyErr_SetObject((PyObject*)state.py->classes[CLASS_PARSE_ERROR],exc); + Py_DECREF(exc); } return r_obj; @@ -1953,13 +2028,6 @@ static int module_exec(PyObject *module) { size_t tu_i=0; size_t char_i=0; module_state *state = PyModule_GetState(module); - state->parse_error_exc_type = PyErr_NewException(FULL_MODULE_STR ".ParseError",PyExc_RuntimeError,NULL); - if(PyModule_AddObject(module,"ParseError",state->parse_error_exc_type)) goto error; - Py_INCREF(state->parse_error_exc_type); - - state->parse_warn_exc_type = PyErr_NewException(FULL_MODULE_STR ".ParseWarning",PyExc_UserWarning,NULL); - if(PyModule_AddObject(module,"ParseWarning",state->parse_warn_exc_type)) goto error; - Py_INCREF(state->parse_warn_exc_type); for(; tu_itag_names[tu_i] = PyUnicode_FromString(tagged_union_names[tu_i]); @@ -1996,6 +2064,10 @@ static int module_exec(PyObject *module) { if(frozen_list_bases == NULL) goto error; PyTuple_SetItem(frozen_list_bases,0,(PyObject*)state->classes[i]); Py_INCREF(state->classes[i]); + } else if(i == CLASS_PARSE_ERROR) { + state->classes[i] = (PyTypeObject*)PyType_FromSpecWithBases(&class_specs[i].spec,PyExc_RuntimeError); + } else if(i == CLASS_PARSE_WARNING) { + state->classes[i] = (PyTypeObject*)PyType_FromSpecWithBases(&class_specs[i].spec,PyExc_UserWarning); } else if(class_specs[i].list_base) { assert(frozen_list_bases != NULL); state->classes[i] = (PyTypeObject*)PyType_FromSpecWithBases(&class_specs[i].spec,frozen_list_bases); @@ -2023,9 +2095,7 @@ static int module_exec(PyObject *module) { decref_array((PyObject**)state->classes,i); decref_array(state->tag_names,tu_i); decref_array(state->char_objects,char_i); - Py_XDECREF(state->parse_warn_exc_type); - Py_DECREF(state->parse_error_exc_type); - state->parse_error_exc_type = NULL; + state->classes[0] = NULL; return -1; } @@ -2038,16 +2108,12 @@ static int module_traverse(PyObject *module,visitproc visit,void *arg) { r = visit_array(state->enum_values__{$ type $},ENUM_VALUE_COUNT__{$ type $},visit,arg); if(r) return r; //% endfor - r = visit_array((PyObject**)state->classes,CLASS_COUNT,visit,arg); - if(r) return r; - r = visit(state->parse_warn_exc_type,arg); - if(r) return r; - return visit(state->parse_error_exc_type,arg); + return visit_array((PyObject**)state->classes,CLASS_COUNT,visit,arg); } static void module_free(void *module) { module_state *state = PyModule_GetState(module); - if(state->parse_error_exc_type == NULL) return; + if(state->classes[0] == NULL) return; //% for type in types|select('enumeration_t') decref_array(state->enum_values__{$ type $},ENUM_VALUE_COUNT__{$ type $}); @@ -2055,8 +2121,6 @@ static void module_free(void *module) { decref_array((PyObject**)state->classes,CLASS_COUNT); decref_array(state->tag_names,TAGGED_UNION_NAME_COUNT); decref_array(state->char_objects,ENUM_CHAR_COUNT); - Py_DECREF(state->parse_warn_exc_type); - Py_DECREF(state->parse_error_exc_type); } /* Python 3.7 doesn't offer a way to get per-module state if multi-phase diff --git a/xml_parser_generator/schema.json b/xml_parser_generator/schema.json index 702854c0..45ede889 100644 --- a/xml_parser_generator/schema.json +++ b/xml_parser_generator/schema.json @@ -127,7 +127,7 @@ }, "content": { "internal": "docInternalType", - "para": "#string", + "para": "docParaType", "sect1": "docSect1Type" } }, diff --git a/xml_parser_generator/stubs_template.pyi b/xml_parser_generator/stubs_template.pyi index 023cf70c..050fcac2 100644 --- a/xml_parser_generator/stubs_template.pyi +++ b/xml_parser_generator/stubs_template.pyi @@ -2,8 +2,8 @@ import enum from typing import Generic,Literal,overload,Protocol,SupportsIndex,TypeVar from collections.abc import Iterable -T = TypeVar('T') -U = TypeVar('U') +T = TypeVar('T',covariant=True) +U = TypeVar('U',covariant=True) class SupportsRead(Protocol): def read(self, length: int, /) -> bytes | bytearray: ... @@ -36,7 +36,10 @@ class TaggedValue(Generic[T, U]): def __getitem__(self, i: SupportsIndex) -> T | U: ... class ParseError(RuntimeError): - pass + @property + def message(self) -> str: ... + @property + def lineno(self) -> int: ... class ParseWarning(UserWarning): pass @@ -121,3 +124,9 @@ class {$ type $}(enum.Enum): {$ type $} = Literal[{% for c in type.values %}{$ "'"~c~"'" $}{$ ',' if not loop.last $}{% endfor %}] //% endif //% endfor + +Node = ( +//% for type in types|map(attribute='py_name')|sort|unique + {$ '| ' if not loop.first $}{$ type $} +//% endfor +) From 4122200e2ba16ffb7ee0d15fdfc342859719542e Mon Sep 17 00:00:00 2001 From: Rouslan Korneychuk Date: Sat, 25 Nov 2023 13:07:25 -0500 Subject: [PATCH 20/65] Getting close to working again --- breathe/directives/__init__.py | 15 +- breathe/directives/class_like.py | 8 +- breathe/directives/file.py | 22 +-- breathe/directives/function.py | 35 +++-- breathe/directives/index.py | 20 +-- breathe/finder/__init__.py | 13 +- breathe/finder/compound.py | 25 +-- breathe/finder/factory.py | 13 +- breathe/finder/index.py | 20 ++- breathe/parser.py | 21 ++- breathe/renderer/__init__.py | 22 ++- breathe/renderer/filter.py | 135 +++++++++-------- breathe/renderer/mask.py | 29 ++-- breathe/renderer/sphinxrenderer.py | 141 ++++++++++------- tests/data/arange.xml | 12 +- tests/data/ellipsis.xml | 2 +- tests/test_renderer.py | 21 +-- tests/test_utils.py | 4 +- xml_parser_generator/make_parser.py | 4 + xml_parser_generator/module_template.c | 192 +++++++++++++++++++++--- xml_parser_generator/schema.json | 35 +++-- xml_parser_generator/stubs_template.pyi | 15 +- 22 files changed, 512 insertions(+), 292 deletions(-) diff --git a/breathe/directives/__init__.py b/breathe/directives/__init__.py index 9b2bcdab..9e5c62e3 100644 --- a/breathe/directives/__init__.py +++ b/breathe/directives/__init__.py @@ -1,7 +1,7 @@ from __future__ import annotations from breathe.finder.factory import FinderFactory -from breathe.parser import FileIOError, ParserError +from breathe import parser from breathe.renderer import format_parser_error, RenderContext from breathe.renderer.filter import FilterFactory from breathe.renderer.sphinxrenderer import SphinxRenderer @@ -15,6 +15,7 @@ if TYPE_CHECKING: from breathe.parser import DoxygenParserFactory from breathe.project import ProjectInfoFactory, ProjectInfo + from breathe.renderer import TaggedNode from breathe.renderer.filter import Filter from breathe.renderer.mask import MaskFactoryBase from breathe.renderer.target import TargetHandler @@ -93,7 +94,7 @@ def create_warning(self, project_info: ProjectInfo | None, **kwargs) -> _Warning def render( self, - node_stack, + node_stack: list[TaggedNode], project_info: ProjectInfo, filter_: Filter, target_handler: TargetHandler, @@ -106,21 +107,23 @@ def render( object_renderer = SphinxRenderer( self.parser_factory.app, project_info, - node_stack, + [tn.value for tn in node_stack], self.state, self.state.document, target_handler, self.parser_factory.create_compound_parser(project_info), filter_, ) - except ParserError as e: + except parser.ParserError as e: return format_parser_error( "doxygenclass", e.message, e.filename, self.state, self.lineno, True ) - except FileIOError as e: + except parser.FileIOError as e: return format_parser_error( "doxygenclass", e.error, e.filename, self.state, self.lineno, True ) context = RenderContext(node_stack, mask_factory, directive_args) - return object_renderer.render(node_stack[0], context) + node = node_stack[0].value + assert isinstance(node, parser.Node) + return object_renderer.render(node, context) diff --git a/breathe/directives/class_like.py b/breathe/directives/class_like.py index e5e70112..cc7fc780 100644 --- a/breathe/directives/class_like.py +++ b/breathe/directives/class_like.py @@ -1,16 +1,18 @@ +from __future__ import annotations + from breathe.directives import BaseDirective from breathe.file_state_cache import MTimeError from breathe.project import ProjectError from breathe.renderer.mask import NullMaskFactory from breathe.renderer.target import create_target_handler -from docutils.nodes import Node from docutils.parsers.rst.directives import unchanged_required, unchanged, flag from typing import TYPE_CHECKING if TYPE_CHECKING: - from breathe import parser + from breathe import renderer + from docutils.nodes import Node class _DoxygenClassLikeDirective(BaseDirective): @@ -50,7 +52,7 @@ def run(self) -> list[Node]: finder_filter = self.filter_factory.create_compound_finder_filter(name, self.kind) - matches: list[parser.Node] = [] + matches: list[list[renderer.TaggedNode]] = [] finder.filter_(finder_filter, matches) if len(matches) == 0: diff --git a/breathe/directives/file.py b/breathe/directives/file.py index 78686516..3a858661 100644 --- a/breathe/directives/file.py +++ b/breathe/directives/file.py @@ -1,8 +1,10 @@ +from __future__ import annotations + from ..renderer.mask import NullMaskFactory from ..directives import BaseDirective -from ..project import ProjectError +from breathe import project -from breathe.renderer import RenderContext +from breathe import renderer, parser from breathe.renderer.sphinxrenderer import SphinxRenderer from breathe.renderer.target import create_target_handler @@ -18,11 +20,11 @@ class _BaseFileDirective(BaseDirective): # information is present in the Directive class from the docutils framework that we'd have to # pass way too much stuff to a helper object to be reasonable. - def handle_contents(self, file_, project_info): + def handle_contents(self, file_: str, project_info): finder = self.finder_factory.create_finder(project_info) finder_filter = self.filter_factory.create_file_finder_filter(file_) - matches = [] + matches: list[list[renderer.TaggedNode]] = [] finder.filter_(finder_filter, matches) if len(matches) > 1: @@ -40,7 +42,7 @@ def handle_contents(self, file_, project_info): object_renderer = SphinxRenderer( self.parser_factory.app, project_info, - node_stack, + [tv.value for tv in node_stack], self.state, self.state.document, target_handler, @@ -49,8 +51,10 @@ def handle_contents(self, file_, project_info): ) mask_factory = NullMaskFactory() - context = RenderContext(node_stack, mask_factory, self.directive_args) - node_list.extend(object_renderer.render(node_stack[0], context)) + context = renderer.RenderContext(node_stack, mask_factory, self.directive_args) + value = node_stack[0].value + assert isinstance(value,parser.Node) + node_list.extend(object_renderer.render(value, context)) return node_list @@ -76,7 +80,7 @@ def run(self): file_ = self.arguments[0] try: project_info = self.project_info_factory.create_project_info(self.options) - except ProjectError as e: + except project.ProjectError as e: warning = self.create_warning(None) return warning.warn("doxygenfile: %s" % e) @@ -104,7 +108,7 @@ def run(self): file_ = self.arguments[0] try: project_info = self.project_info_factory.retrieve_project_info_for_auto(self.options) - except ProjectError as e: + except project.ProjectError as e: warning = self.create_warning(None) return warning.warn("autodoxygenfile: %s" % e) diff --git a/breathe/directives/function.py b/breathe/directives/function.py index 40326318..b4f7b76c 100644 --- a/breathe/directives/function.py +++ b/breathe/directives/function.py @@ -1,15 +1,15 @@ +from __future__ import annotations + from breathe.directives import BaseDirective from breathe.exception import BreatheError from breathe.file_state_cache import MTimeError from breathe import parser from breathe.project import ProjectError -from breathe.renderer import format_parser_error, RenderContext +from breathe.renderer import format_parser_error, RenderContext, mask, TaggedNode from breathe.renderer.sphinxrenderer import WithContext -from breathe.renderer.mask import MaskFactory, NullMaskFactory, NoParameterNamesMask from breathe.renderer.sphinxrenderer import SphinxRenderer from breathe.renderer.target import create_target_handler -from docutils.nodes import Node from docutils.parsers.rst.directives import unchanged_required, flag from sphinx.domains import cpp @@ -18,7 +18,11 @@ import re -from typing import Any, List, Optional +from typing import Any, List, Optional, TYPE_CHECKING + +if TYPE_CHECKING: + from breathe import project + from docutils.nodes import Node class _NoMatchingFunctionError(BreatheError): @@ -96,14 +100,14 @@ def run(self) -> List[Node]: namespace, function_name ) - # TODO: find a more specific type for the Doxygen nodes - matchesAll: List[Any] = [] + matchesAll: list[list[TaggedNode]] = [] finder.filter_(finder_filter, matchesAll) - matches = [] + matches: list[list[TaggedNode]] = [] for m in matchesAll: # only take functions and friend functions # ignore friend classes - node = m[0] + node = m[0].value + assert isinstance(node, parser.Node_MemberType) if node.kind == "friend" and not node.argsstring: continue matches.append(m) @@ -154,7 +158,7 @@ def run(self) -> List[Node]: project_info, filter_, target_handler, - NullMaskFactory(), + mask.NullMaskFactory(), self.directive_args, ) @@ -202,7 +206,7 @@ def stripDeclarator(declarator): return paramQual def _create_function_signature( - self, node_stack, project_info, filter_, target_handler, mask_factory, directive_args + self, node_stack: list[TaggedNode], project_info, filter_, target_handler, mask_factory, directive_args ) -> str: "Standard render process used by subclasses" @@ -210,7 +214,7 @@ def _create_function_signature( object_renderer = SphinxRenderer( self.parser_factory.app, project_info, - node_stack, + [tn.value for tn in node_stack], self.state, self.state.document, target_handler, @@ -227,8 +231,9 @@ def _create_function_signature( ) context = RenderContext(node_stack, mask_factory, directive_args) - node = node_stack[0] + node = node_stack[0].value with WithContext(object_renderer, context): + assert isinstance(node,parser.Node_memberdefType) # this part should be kept in sync with visit_function in sphinxrenderer name = node.name # assume we are only doing this for C++ declarations @@ -237,7 +242,7 @@ def _create_function_signature( object_renderer.create_template_prefix(node), "".join(n.astext() for n in object_renderer.render(node.type)), name, - node.argsstring, + node.argsstring or '', ] ) cpp_parser = cpp.DefinitionParser( @@ -246,7 +251,7 @@ def _create_function_signature( ast = cpp_parser.parse_declaration("function", "function") return str(ast) - def _resolve_function(self, matches, args: Optional[cpp.ASTParametersQualifiers], project_info): + def _resolve_function(self, matches: list[list[TaggedNode]], args: cpp.ASTParametersQualifiers | None, project_info: project.ProjectInfo): if not matches: raise _NoMatchingFunctionError() @@ -260,7 +265,7 @@ def _resolve_function(self, matches, args: Optional[cpp.ASTParametersQualifiers] {"no-link": ""}, project_info, self.state.document ) filter_ = self.filter_factory.create_outline_filter(text_options) - mask_factory = MaskFactory({parser.Node_paramType: NoParameterNamesMask}) + mask_factory = mask.MaskFactory({parser.Node_paramType: mask.no_parameter_names}) # Override the directive args for this render directive_args = self.directive_args[:] diff --git a/breathe/directives/index.py b/breathe/directives/index.py index 3a2e846d..029f7e4d 100644 --- a/breathe/directives/index.py +++ b/breathe/directives/index.py @@ -1,7 +1,7 @@ from breathe.directives import BaseDirective -from breathe.parser import ParserError, FileIOError +from breathe import parser from breathe.project import ProjectError -from breathe.renderer import format_parser_error, RenderContext +from breathe.renderer import format_parser_error, RenderContext, TaggedNode from breathe.renderer.mask import NullMaskFactory from breathe.renderer.sphinxrenderer import SphinxRenderer from breathe.renderer.target import create_target_handler @@ -11,7 +11,7 @@ class RootDataObject: - node_type = "root" + pass class _BaseIndexDirective(BaseDirective): @@ -24,11 +24,11 @@ class _BaseIndexDirective(BaseDirective): def handle_contents(self, project_info) -> list[Node]: try: finder = self.finder_factory.create_finder(project_info) - except ParserError as e: + except parser.ParserError as e: return format_parser_error( self.name, e.message, e.filename, self.state, self.lineno, True ) - except FileIOError as e: + except parser.FileIOError as e: return format_parser_error(self.name, e.error, e.filename, self.state, self.lineno) data_object = finder.root() @@ -48,15 +48,17 @@ def handle_contents(self, project_info) -> list[Node]: ) mask_factory = NullMaskFactory() - context = RenderContext([data_object, RootDataObject()], mask_factory, self.directive_args) + context = RenderContext([TaggedNode(None,data_object), TaggedNode(None,RootDataObject())], mask_factory, self.directive_args) + value = context.node_stack[0].value + assert isinstance(value,parser.Node) try: - node_list = object_renderer.render(context.node_stack[0], context) - except ParserError as e: + node_list = object_renderer.render(value, context) + except parser.ParserError as e: return format_parser_error( self.name, e.message, e.filename, self.state, self.lineno, True ) - except FileIOError as e: + except parser.FileIOError as e: return format_parser_error(self.name, e.error, e.filename, self.state, self.lineno) return node_list diff --git a/breathe/finder/__init__.py b/breathe/finder/__init__.py index c1da13fe..37261553 100644 --- a/breathe/finder/__init__.py +++ b/breathe/finder/__init__.py @@ -6,16 +6,9 @@ from breathe.project import ProjectInfo from breathe.finder.factory import DoxygenItemFinderFactory from breathe.renderer.filter import Filter + from breathe.renderer import TaggedNode -T = TypeVar('T') - -def stack(element, list_): - """Stack an element on to the start of a list and return as a new list""" - - # Copy list first so we have a new list to insert into - output = list_[:] - output.insert(0, element) - return output +T = TypeVar('T', covariant=True) class ItemFinder(Generic[T]): @@ -24,5 +17,5 @@ def __init__(self, project_info: ProjectInfo, data_object: T, item_finder_factor self.item_finder_factory: DoxygenItemFinderFactory = item_finder_factory self.project_info = project_info - def filter_(self, ancestors, filter_: Filter, matches) -> None: + def filter_(self, ancestors: list[TaggedNode], filter_: Filter, matches: list[list[TaggedNode]]) -> None: raise NotImplementedError diff --git a/breathe/finder/compound.py b/breathe/finder/compound.py index a83c44c3..cf3addce 100644 --- a/breathe/finder/compound.py +++ b/breathe/finder/compound.py @@ -1,23 +1,24 @@ -from breathe.finder import ItemFinder, stack +from breathe.finder import ItemFinder from breathe.renderer.filter import Filter from breathe import parser +from breathe.renderer import TaggedNode class DoxygenTypeSubItemFinder(ItemFinder[parser.Node_DoxygenType]): - def filter_(self, ancestors, filter_: Filter, matches) -> None: + def filter_(self, ancestors, filter_: Filter, matches: list[list[TaggedNode]]) -> None: """Find nodes which match the filter. Doesn't test this node, only its children""" - node_stack = stack(self.data_object, ancestors) + node_stack = [TaggedNode(None,self.data_object)] + ancestors assert len(self.data_object.compounddef) == 1 compound_finder = self.item_finder_factory.create_finder(self.data_object.compounddef[0]) compound_finder.filter_(node_stack, filter_, matches) class CompoundDefTypeSubItemFinder(ItemFinder[parser.Node_compounddefType]): - def filter_(self, ancestors, filter_: Filter, matches) -> None: + def filter_(self, ancestors, filter_: Filter, matches: list[list[TaggedNode]]) -> None: """Finds nodes which match the filter and continues checks to children""" - node_stack = stack(self.data_object, ancestors) + node_stack = [TaggedNode(None,self.data_object)] + ancestors if filter_.allow(node_stack): matches.append(node_stack) @@ -31,10 +32,10 @@ def filter_(self, ancestors, filter_: Filter, matches) -> None: class SectionDefTypeSubItemFinder(ItemFinder[parser.Node_sectiondefType]): - def filter_(self, ancestors, filter_: Filter, matches) -> None: + def filter_(self, ancestors, filter_: Filter, matches: list[list[TaggedNode]]) -> None: """Find nodes which match the filter. Doesn't test this node, only its children""" - node_stack = stack(self.data_object, ancestors) + node_stack = [TaggedNode(None,self.data_object)] + ancestors if filter_.allow(node_stack): matches.append(node_stack) @@ -44,22 +45,22 @@ def filter_(self, ancestors, filter_: Filter, matches) -> None: class MemberDefTypeSubItemFinder(ItemFinder[parser.Node_memberdefType]): - def filter_(self, ancestors, filter_: Filter, matches) -> None: + def filter_(self, ancestors, filter_: Filter, matches: list[list[TaggedNode]]) -> None: data_object = self.data_object - node_stack = stack(data_object, ancestors) + node_stack = [TaggedNode(None,self.data_object)] + ancestors if filter_.allow(node_stack): matches.append(node_stack) if data_object.kind == parser.DoxMemberKind.enum: for value in data_object.enumvalue: - value_stack = stack(value, node_stack) + value_stack = [TaggedNode('enumvalue',value)] + node_stack if filter_.allow(value_stack): matches.append(value_stack) class RefTypeSubItemFinder(ItemFinder[parser.Node_refType]): - def filter_(self, ancestors, filter_: Filter, matches) -> None: - node_stack = stack(self.data_object, ancestors) + def filter_(self, ancestors, filter_: Filter, matches: list[list[TaggedNode]]) -> None: + node_stack = [TaggedNode(None,self.data_object)] + ancestors if filter_.allow(node_stack): matches.append(node_stack) diff --git a/breathe/finder/factory.py b/breathe/finder/factory.py index 15576d84..d4ff7be1 100644 --- a/breathe/finder/factory.py +++ b/breathe/finder/factory.py @@ -5,6 +5,7 @@ from breathe.finder import compound as compoundfinder from breathe import parser from breathe.project import ProjectInfo +from breathe.renderer import FakeParentNode, TaggedNode from breathe.renderer.filter import Filter from sphinx.application import Sphinx @@ -26,7 +27,7 @@ def __call__(self, project_info: ProjectInfo, *args) -> indexfinder.CompoundType class DoxygenItemFinderFactory: - def __init__(self, finders: dict[type[parser.Node], ItemFinderCreator], project_info: ProjectInfo): + def __init__(self, finders: dict[type[parser.NodeOrValue], ItemFinderCreator], project_info: ProjectInfo): self.finders = finders self.project_info = project_info @@ -34,20 +35,16 @@ def create_finder(self, data_object) -> ItemFinder: return self.finders[type(data_object)](self.project_info, data_object, self) -class _FakeParentNode: - node_type = "fakeparent" - - class Finder: def __init__(self, root, item_finder_factory: DoxygenItemFinderFactory) -> None: self._root = root self.item_finder_factory = item_finder_factory - def filter_(self, filter_: Filter, matches: list[parser.Node]) -> None: + def filter_(self, filter_: Filter, matches: list[list[TaggedNode]]) -> None: """Adds all nodes which match the filter into the matches list""" item_finder = self.item_finder_factory.create_finder(self._root) - item_finder.filter_([_FakeParentNode()], filter_, matches) + item_finder.filter_([TaggedNode(None,FakeParentNode())], filter_, matches) def root(self): return self._root @@ -64,7 +61,7 @@ def create_finder(self, project_info: ProjectInfo) -> Finder: return self.create_finder_from_root(root, project_info) def create_finder_from_root(self, root, project_info: ProjectInfo) -> Finder: - finders: dict[type[parser.Node], ItemFinderCreator] = { + finders: dict[type[parser.NodeOrValue], ItemFinderCreator] = { parser.Node_DoxygenTypeIndex: indexfinder.DoxygenTypeSubItemFinder, parser.Node_CompoundType: _CreateCompoundTypeSubFinder(self.app, self.parser_factory), parser.Node_MemberType: indexfinder.MemberTypeSubItemFinder, diff --git a/breathe/finder/index.py b/breathe/finder/index.py index d67e369d..7a1ab4ed 100644 --- a/breathe/finder/index.py +++ b/breathe/finder/index.py @@ -1,10 +1,16 @@ -from breathe.finder import ItemFinder, stack -from breathe.renderer.filter import Filter, FilterFactory +from __future__ import annotations + +from breathe.finder import ItemFinder +from breathe.renderer.filter import FilterFactory from breathe import parser +from breathe.renderer import TaggedNode from sphinx.application import Sphinx -from typing import Any +from typing import Any, TYPE_CHECKING + +if TYPE_CHECKING: + from breathe.renderer.filter import Filter class DoxygenTypeSubItemFinder(ItemFinder[parser.Node_DoxygenTypeIndex]): @@ -12,7 +18,7 @@ def filter_(self, ancestors, filter_: Filter, matches) -> None: """Find nodes which match the filter. Doesn't test this node, only its children""" compounds = self.data_object.compound - node_stack = stack(self.data_object, ancestors) + node_stack = [TaggedNode(None,self.data_object)] + ancestors for compound in compounds: compound_finder = self.item_finder_factory.create_finder(compound) compound_finder.filter_(node_stack, filter_, matches) @@ -25,7 +31,7 @@ def __init__(self, app: Sphinx, compound_parser: parser.DoxygenCompoundParser, * self.filter_factory = FilterFactory(app) self.compound_parser = compound_parser - def filter_(self, ancestors, filter_: Filter, matches) -> None: + def filter_(self, ancestors: list[TaggedNode], filter_: Filter, matches) -> None: """Finds nodes which match the filter and continues checks to children Requires parsing the xml files referenced by the children for which we use the compound @@ -33,7 +39,7 @@ def filter_(self, ancestors, filter_: Filter, matches) -> None: top level node of the compound file. """ - node_stack = stack(self.data_object, ancestors) + node_stack = [TaggedNode(None,self.data_object)] + ancestors # Match against compound object if filter_.allow(node_stack): @@ -67,7 +73,7 @@ def filter_(self, ancestors, filter_: Filter, matches) -> None: class MemberTypeSubItemFinder(ItemFinder[parser.Node_memberdefType]): def filter_(self, ancestors, filter_: Filter, matches) -> None: - node_stack = stack(self.data_object, ancestors) + node_stack = [TaggedNode(None,self.data_object)] + ancestors # Match against member object if filter_.allow(node_stack): diff --git a/breathe/parser.py b/breathe/parser.py index baac33b4..95db3e9d 100644 --- a/breathe/parser.py +++ b/breathe/parser.py @@ -1,5 +1,6 @@ from __future__ import annotations +import reprlib from breathe import file_state_cache, path_handler from breathe.project import ProjectInfo @@ -7,13 +8,25 @@ from sphinx.application import Sphinx -from typing import overload +from typing import overload, TYPE_CHECKING +if TYPE_CHECKING: + NodeOrValue = Node | str | None + +@reprlib.recursive_repr() +def node_repr(self: Node) -> str: + cls = type(self) + fields = ', '.join(f'{field}={getattr(self,field)!r}' for field in cls._fields) + if isinstance(self,FrozenList): + pos = ', '.join(map(repr,self)) + fields = f'[{pos}], {fields}' + return f'{cls.__name__}({fields})' +Node.__repr__ = node_repr # type: ignore class ParserError(RuntimeError): def __init__(self, message: str, filename: str, lineno: int | None = None): - super().__init__(message,filename,lineno) + super().__init__(message,lineno,filename) @property def message(self) -> str: @@ -29,8 +42,8 @@ def filename(self) -> str: def __str__(self): if self.lineno is None: - return f"file {self.filename}: {self.message}" - return f"file {self.filename}:{self.lineno}: {self.message}" + return f"{self.filename}: {self.message}" + return f"{self.filename}:{self.lineno}: {self.message}" class FileIOError(RuntimeError): diff --git a/breathe/renderer/__init__.py b/breathe/renderer/__init__.py index fe53a5da..1b472615 100644 --- a/breathe/renderer/__init__.py +++ b/breathe/renderer/__init__.py @@ -1,5 +1,15 @@ +from __future__ import annotations + from docutils import nodes import textwrap +from typing import NamedTuple, TYPE_CHECKING, Union + +if TYPE_CHECKING: + from breathe import parser + from breathe.renderer import mask + from breathe.directives.index import RootDataObject + + DataObject = Union[parser.NodeOrValue, RootDataObject, 'FakeParentNode'] def format_parser_error(name: str, error: str, filename: str, state, lineno: int, do_unicode_warning: bool = False) -> list[nodes.Node]: @@ -33,10 +43,16 @@ def format_parser_error(name: str, error: str, filename: str, state, lineno: int ), ] +class FakeParentNode: + pass + +class TaggedNode(NamedTuple): + tag: str | None + value: DataObject class RenderContext: def __init__( - self, node_stack, mask_factory, directive_args, domain: str = "", child: bool = False + self, node_stack: list[TaggedNode], mask_factory: mask.MaskFactoryBase, directive_args, domain: str = "", child: bool = False ) -> None: self.node_stack = node_stack self.mask_factory = mask_factory @@ -44,7 +60,7 @@ def __init__( self.domain = domain self.child = child - def create_child_context(self, data_object) -> "RenderContext": + def create_child_context(self, data_object: parser.NodeOrValue, tag: str | None = None) -> RenderContext: node_stack = self.node_stack[:] - node_stack.insert(0, self.mask_factory.mask(data_object)) + node_stack.insert(0, TaggedNode(tag,self.mask_factory.mask(data_object))) return RenderContext(node_stack, self.mask_factory, self.directive_args, self.domain, True) diff --git a/breathe/renderer/filter.py b/breathe/renderer/filter.py index 4d40fc8d..97413b82 100644 --- a/breathe/renderer/filter.py +++ b/breathe/renderer/filter.py @@ -207,9 +207,12 @@ from sphinx.application import Sphinx import os -from typing import Any, Callable, Dict, List +from typing import Any, Callable, TYPE_CHECKING from collections.abc import Iterable +if TYPE_CHECKING: + from breathe import renderer + class UnrecognisedKindError(Exception): pass @@ -221,7 +224,7 @@ class UnrecognisedKindError(Exception): class Selector: - def __call__(self, node_stack): + def __call__(self, node_stack: list[renderer.TaggedNode]) -> renderer.TaggedNode: raise NotImplementedError @property @@ -232,10 +235,6 @@ def node_type(self): def kind(self): return KindAccessor(self) - @property - def node_name(self): - return AttributeAccessor(self, "node_name") - @property def name(self): return AttributeAccessor(self, "name") @@ -254,7 +253,7 @@ def prot(self): @property def valueOf(self): - return AttributeAccessor(self, "valueOf_") + return ValueOfAccessor(self) @property def id(self): @@ -265,12 +264,12 @@ class Ancestor(Selector): def __init__(self, generations): self.generations = generations - def __call__(self, node_stack): + def __call__(self, node_stack: list[renderer.TaggedNode]) -> renderer.TaggedNode: return node_stack[self.generations] class Parent(Selector): - def __call__(self, node_stack): + def __call__(self, node_stack: list[renderer.TaggedNode]) -> renderer.TaggedNode: return node_stack[1] def __repr__(self) -> str: @@ -278,7 +277,7 @@ def __repr__(self) -> str: class Node(Selector): - def __call__(self, node_stack): + def __call__(self, node_stack: list[renderer.TaggedNode]) -> renderer.TaggedNode: return node_stack[0] def __repr__(self) -> str: @@ -309,28 +308,18 @@ def is_one_of(self, collection: Iterable[Any]) -> InFilter: def has_content(self) -> HasContentFilter: return HasContentFilter(self) - def endswith(self, options: List[str]) -> EndsWithFilter: + def endswith(self, options: list[str]) -> EndsWithFilter: return EndsWithFilter(self, options) class NameAccessor(Accessor): def __call__(self, node_stack) -> str: - return self.selector(node_stack).name - - -class NodeNameAccessor(Accessor): - """Check the .node_name member which is declared on refTypeSub nodes - - It distinguishes between innerclass, innernamespace, etc. - """ - - def __call__(self, node_stack) -> str: - return self.selector(node_stack).node_name + return self.selector(node_stack).value.name # type: ignore class NodeTypeAccessor(Accessor): def __call__(self, node_stack) -> type: - return type(self.selector(node_stack)) + return type(self.selector(node_stack).value) def __repr__(self) -> str: return f'NodeTypeAccessor({self.selector!r})' @@ -338,7 +327,7 @@ def __repr__(self) -> str: class KindAccessor(Accessor): def __call__(self, node_stack): - return self.selector(node_stack).kind.value + return self.selector(node_stack).value.kind.value # type: ignore class AttributeAccessor(Accessor): @@ -352,24 +341,32 @@ def __init__(self, selector: Selector, attribute_name: str) -> None: self.attribute_name = attribute_name def __call__(self, node_stack) -> Any: - return getattr(self.selector(node_stack), self.attribute_name) + return getattr(self.selector(node_stack).value, self.attribute_name) def __repr__(self) -> str: return f'AttributeAccessor({self.selector!r}, {self.attribute_name!r})' class LambdaAccessor(Accessor): - def __init__(self, selector: Selector, func: Callable[[Any], str]): + def __init__(self, selector: Selector, func: Callable[[Any], Any]): super().__init__(selector) self.func = func def __call__(self, node_stack): - return self.func(self.selector(node_stack)) + return self.func(self.selector(node_stack).value) class NamespaceAccessor(Accessor): def __call__(self, node_stack): - return self.selector(node_stack).namespaces + r = [] + r.extend(self.selector(node_stack).value.innernamespace) # type: ignore + r.extend(self.selector(node_stack).value.innerclass) # type: ignore + return r + + +class ValueOfAccessor(Accessor): + def __call__(self, node_stack) -> str: + return ''.join(self.selector(node_stack).value) # type: ignore ############################################################################### @@ -431,7 +428,7 @@ class InFilter(Filter): def __init__(self, accessor: Accessor, members: Iterable[Any]) -> None: self.accessor = accessor - self.members = frozenset(members) + self.members = members def allow(self, node_stack) -> bool: name = self.accessor(node_stack) @@ -565,7 +562,7 @@ def allow(self, node_stack) -> bool: class Gather(Filter): - def __init__(self, accessor: Accessor, names: List[str]): + def __init__(self, accessor: Accessor, names: list[str]): self.accessor = accessor self.names = names @@ -574,6 +571,24 @@ def allow(self, node_stack) -> bool: return False +class TagFilter(Filter): + def __init__(self, selector: Selector, tags: Iterable[str]): + self.selector = selector + self.tags = frozenset(tags) + + def allow(self, node_stack) -> bool: + return self.selector(node_stack).tag in self.tags + + +class LambdaFilter(Filter): + def __init__(self, selector: Selector, func: Callable[[Any], Any]): + self.selector = selector + self.func = func + + def allow(self, node_stack): + return self.func(self.selector(node_stack).value) + + ############################################################################### # Other stuff ############################################################################### @@ -659,7 +674,7 @@ def create_class_filter(self, target: str, options: dict[str, Any]) -> Filter: self.create_show_filter(filter_options), ) - def create_innerclass_filter(self, options: Dict[str, Any], outerclass: str = "") -> Filter: + def create_innerclass_filter(self, options: dict[str, Any], outerclass: str = "") -> Filter: """ :param outerclass: Should be the class/struct being target by the directive calling this code. If it is a group or namespace directive then it should be left @@ -670,7 +685,7 @@ def create_innerclass_filter(self, options: Dict[str, Any], outerclass: str = "" """ node = Node() - node_is_innerclass = (node.node_type == "ref") & (node.node_name == "innerclass") + node_is_innerclass = (node.node_type == parser.Node_refType) & TagFilter(node, ["innerclass"]) parent = Parent() parent_is_compounddef = parent.node_type == "compounddef" @@ -715,7 +730,7 @@ def create_innerclass_filter(self, options: Dict[str, Any], outerclass: str = "" # to check the parent's type as well return innerclass | public_innerclass_filter | description - def create_show_filter(self, options: Dict[str, Any]) -> Filter: + def create_show_filter(self, options: dict[str, Any]) -> Filter: """Currently only handles the header-file entry""" try: @@ -738,7 +753,7 @@ def create_show_filter(self, options: Dict[str, Any]) -> Filter: ) def _create_description_filter( - self, allow: bool, level: str, options: Dict[str, Any] + self, allow: bool, level: str, options: dict[str, Any] ) -> Filter: """Whether or not we allow descriptions is determined by the calling function and we just do whatever the 'allow' function parameter tells us. @@ -758,7 +773,7 @@ def _create_description_filter( return description_filter - def _create_public_members_filter(self, options: Dict[str, Any]) -> Filter: + def _create_public_members_filter(self, options: dict[str, Any]) -> Filter: node = Node() node_is_memberdef = node.node_type == "memberdef" node_is_public = node.prot == "public" @@ -793,7 +808,7 @@ def _create_public_members_filter(self, options: Dict[str, Any]) -> Filter: return public_members_filter def _create_non_public_members_filter( - self, prot: str, option_name: str, options: Dict[str, Any] + self, prot: str, option_name: str, options: dict[str, Any] ) -> Filter: """'prot' is the doxygen xml term for 'public', 'protected' and 'private' categories.""" @@ -813,7 +828,7 @@ def _create_non_public_members_filter( filter_ = ~is_memberdef | node_is_public return filter_ - def _create_undoc_members_filter(self, options: Dict[str, Any]) -> Filter: + def _create_undoc_members_filter(self, options: dict[str, Any]) -> Filter: node = Node() node_is_memberdef = node.node_type == "memberdef" @@ -828,7 +843,7 @@ def _create_undoc_members_filter(self, options: Dict[str, Any]) -> Filter: undoc_members_filter = OpenFilter() return undoc_members_filter - def create_class_member_filter(self, options: Dict[str, Any]) -> Filter: + def create_class_member_filter(self, options: dict[str, Any]) -> Filter: """Content filter based on :members: and :private-members: classes""" # I can't fully explain the filtering of descriptions here. More testing needed to figure @@ -858,30 +873,31 @@ def create_class_member_filter(self, options: Dict[str, Any]) -> Filter: allowed_members = (public_members | protected_members | private_members) & undoc_members return allowed_members | description - def create_outline_filter(self, options: Dict[str, Any]) -> Filter: + def create_outline_filter(self, options: dict[str, Any]) -> Filter: if "outline" in options: node = Node() return ~node.node_type.is_one_of([parser.Node_descriptionType, parser.Node_incType]) else: return OpenFilter() - def create_file_filter(self, filename: str, options: Dict[str, Any]) -> Filter: - valid_names: List[str] = [] + def create_file_filter(self, filename: str, options: dict[str, Any]) -> Filter: + valid_names: list[str] = [] + + def gather_namespaces(node: parser.Node_compounddefType): + valid_names.extend(''.join(ns) for ns in node.innernamespace) + valid_names.extend(''.join(ns) for ns in node.innerclass) + return False filter_ = AndFilter( NotFilter( # Gather the "namespaces" attribute from the # compounddef for the file we're rendering and # store the information in the "valid_names" list - # - # Gather always returns false, so, combined with - # the NotFilter this chunk always returns true and - # so does not affect the result of the filtering AndFilter( InFilter(NodeTypeAccessor(Node()), [parser.Node_compounddefType]), InFilter(KindAccessor(Node()), ["file"]), FilePathFilter(LambdaAccessor(Node(), lambda x: x.location), filename), - Gather(LambdaAccessor(Node(), lambda x: x.namespaces), valid_names), + LambdaFilter(Node(), gather_namespaces), ) ), NotFilter( @@ -895,12 +911,8 @@ def create_file_filter(self, filename: str, options: Dict[str, Any]) -> Filter: AndFilter( InFilter(NodeTypeAccessor(Parent()), [parser.Node_compounddefType]), InFilter(NodeTypeAccessor(Node()), [parser.Node_refType]), - InFilter(NodeNameAccessor(Node()), ["innerclass", "innernamespace"]), - NotFilter( - InFilter( - LambdaAccessor(Node(), lambda x: x.content_[0].getValue()), valid_names - ) - ), + TagFilter(Node(), ["innerclass", "innernamespace"]), + LambdaFilter(Node(), (lambda node: ''.join(node) not in valid_names)) ) ), NotFilter( @@ -910,10 +922,10 @@ def create_file_filter(self, filename: str, options: Dict[str, Any]) -> Filter: AndFilter( InFilter(NodeTypeAccessor(Parent()), [parser.Node_compounddefType]), InFilter(NodeTypeAccessor(Node()), [parser.Node_refType]), - InFilter(NodeNameAccessor(Node()), ["innerclass", "innernamespace"]), + TagFilter(Node(), ["innerclass", "innernamespace"]), NamespaceFilter( NamespaceAccessor(Parent()), - LambdaAccessor(Node(), lambda x: x.content_[0].getValue()), + LambdaAccessor(Node(), ''.join), ), ) ), @@ -949,7 +961,7 @@ def create_file_filter(self, filename: str, options: Dict[str, Any]) -> Filter: ) return AndFilter(self.create_outline_filter(options), filter_) - def create_content_filter(self, kind: str, options: Dict[str, Any]) -> Filter: + def create_content_filter(self, kind: str, options: dict[str, Any]) -> Filter: """Returns a filter which matches the contents of the or namespace but not the group or namepace name or description. @@ -975,7 +987,7 @@ def create_content_filter(self, kind: str, options: Dict[str, Any]) -> Filter: parent_is_compounddef = parent.node_type == parser.Node_compounddefType parent_is_class = parent.kind == kind - node_is_innerclass = (node.node_type == parser.Node_refType) & (node.node_name == "innerclass") + node_is_innerclass = (node.node_type == parser.Node_refType) & TagFilter(node, ["innerclass"]) node_is_public = node.prot == "public" public_innerclass = ( @@ -984,13 +996,13 @@ def create_content_filter(self, kind: str, options: Dict[str, Any]) -> Filter: return public_members | public_innerclass - def create_index_filter(self, options: Dict[str, Any]) -> Filter: + def create_index_filter(self, options: dict[str, Any]) -> Filter: filter_ = AndFilter( NotFilter( AndFilter( InFilter(NodeTypeAccessor(Parent()), [parser.Node_compounddefType]), InFilter(NodeTypeAccessor(Node()), [parser.Node_refType]), - InFilter(NodeNameAccessor(Node()), ["innerclass", "innernamespace"]), + TagFilter(Node(), ["innerclass", "innernamespace"]), ) ), NotFilter( @@ -1033,12 +1045,7 @@ def create_member_finder_filter(self, namespace: str, name: str, kind: str) -> F if namespace: parent_matches = ( (parent.node_type == parser.Node_CompoundType) - & ( - (parent.kind == "namespace") - | (parent.kind == "class") - | (parent.kind == "struct") - | (parent.kind == "interface") - ) + & InFilter(parent.kind, ["namespace", "class", "struct", "interface"]) & (parent.name == namespace) ) return parent_matches & node_matches diff --git a/breathe/renderer/mask.py b/breathe/renderer/mask.py index 5e968140..913b7fc6 100644 --- a/breathe/renderer/mask.py +++ b/breathe/renderer/mask.py @@ -18,15 +18,24 @@ """ +from __future__ import annotations -class NoParameterNamesMask: - def __init__(self, data_object) -> None: - self.data_object = data_object +from typing import Callable +from breathe import parser - def __getattr__(self, attr): - if attr in ["declname", "defname", "defval"]: - return None - return getattr(self.data_object, attr) + +def no_parameter_names(node: parser.NodeOrValue): + assert isinstance(node,parser.Node_paramType) + return parser.Node_paramType( + array = node.array, + attributes = node.attributes, + briefdescription = node.briefdescription, + declname = None, + defname = None, + defval = None, + type = node.type, + typeconstraint = node.typeconstraint + ) class MaskFactoryBase: @@ -35,15 +44,15 @@ def mask(self, data_object): class MaskFactory(MaskFactoryBase): - def __init__(self, lookup): + def __init__(self, lookup : dict[type[parser.NodeOrValue],Callable[[parser.NodeOrValue],parser.NodeOrValue]]): self.lookup = lookup - def mask(self, data_object): + def mask(self, data_object: parser.NodeOrValue) -> parser.NodeOrValue: m = self.lookup.get(type(data_object)) if m is None: return data_object return m(data_object) class NullMaskFactory(MaskFactoryBase): - def mask(self, data_object): + def mask(self, data_object: parser.NodeOrValue) -> parser.NodeOrValue: return data_object diff --git a/breathe/renderer/sphinxrenderer.py b/breathe/renderer/sphinxrenderer.py index 047a106b..723a72ea 100644 --- a/breathe/renderer/sphinxrenderer.py +++ b/breathe/renderer/sphinxrenderer.py @@ -24,7 +24,7 @@ try: from sphinxcontrib import phpdomain as php # type: ignore except ImportError: - php = None # type: ignore + php = None cs: Any try: @@ -37,7 +37,7 @@ if TYPE_CHECKING: from breathe.project import ProjectInfo - from breathe.renderer import RenderContext + from breathe.renderer import RenderContext, DataObject from breathe.renderer.filter import Filter from breathe.renderer.target import TargetHandler @@ -528,7 +528,7 @@ class NodeHandler(Generic[T]): def __init__(self,handler): self.handler = handler - self.nodes: set[type[parser.Node]] = set() + self.nodes: set[type[parser.NodeOrValue]] = set() def __call__(self, r: SphinxRenderer, node: T, /) -> list[Node]: raise TypeError() @@ -539,13 +539,12 @@ class TaggedNodeHandler(Generic[T]): def __init__(self,handler): self.handler = handler - self.nodes: set[type[parser.Node]] = set() + self.nodes: set[type[parser.NodeOrValue]] = set() def __call__(self, r: SphinxRenderer, tag: str, node: T, /) -> list[Node]: raise TypeError() -def node_handler(node: type[parser.Node] -) -> Callable[[Callable[[SphinxRenderer, T], list[Node]]],Callable[[SphinxRenderer, T], list[Node]]]: +def node_handler(node: type[parser.NodeOrValue]): def inner(f: Callable[[SphinxRenderer, T], list[Node]]) -> Callable[[SphinxRenderer, T], list[Node]]: if not isinstance(f,NodeHandler): f = NodeHandler(f) @@ -553,8 +552,7 @@ def inner(f: Callable[[SphinxRenderer, T], list[Node]]) -> Callable[[SphinxRende return f return inner -def tagged_node_handler(node: type[parser.Node] -) -> Callable[[Callable[[SphinxRenderer, str, T], list[Node]]],Callable[[SphinxRenderer, str, T], list[Node]]]: +def tagged_node_handler(node: type[parser.NodeOrValue]): def inner(f: Callable[[SphinxRenderer, str, T], list[Node]]) -> Callable[[SphinxRenderer, str, T], list[Node]]: if not isinstance(f,TaggedNodeHandler): f = TaggedNodeHandler(f) @@ -592,14 +590,14 @@ class SphinxRenderer(metaclass=NodeVisitor): Each visit method takes a Doxygen node as an argument and returns a list of RST nodes. """ - node_handlers: ClassVar[dict[type[parser.Node], Callable[[SphinxRenderer, parser.Node], list[Node]]]] - tagged_node_handlers: ClassVar[dict[type[parser.Node], Callable[[SphinxRenderer, str, parser.Node], list[Node]]]] + node_handlers: ClassVar[dict[type[parser.NodeOrValue], Callable[[SphinxRenderer, parser.NodeOrValue], list[Node]]]] + tagged_node_handlers: ClassVar[dict[type[parser.NodeOrValue], Callable[[SphinxRenderer, str, parser.NodeOrValue], list[Node]]]] def __init__( self, app: Sphinx, project_info: ProjectInfo, - node_stack, + node_stack: list[DataObject], state, document: nodes.document, target_handler: TargetHandler, @@ -617,7 +615,7 @@ def __init__( self.compound_parser = compound_parser self.filter_ = filter_ - self.context: Optional[RenderContext] = None + self.context: RenderContext | None = None self.output_defname = True # Nesting level for lists. self.nesting_level = 0 @@ -824,9 +822,8 @@ def content(contentnode): assert n.astext()[-1] == " " txt = display_obj_type + " " declarator[0] = addnodes.desc_annotation(txt, txt) - if not self.app.env.config.breathe_debug_trace_doxygen_ids: + if target is None: target = self.create_doxygen_target(node) - assert target is not None declarator.insert(0, target) if declarator_callback: declarator_callback(declarator) @@ -891,12 +888,12 @@ def get_fully_qualified_name(self): node = node_stack[0] # If the node is a namespace, use its name because namespaces are skipped in the main loop. - if isinstance(node,parser.Node_CompoundType) and node.kind == parser.CompoundKind.namespace: - names.append(node.name) + if isinstance(node.value,parser.Node_CompoundType) and node.value.kind == parser.CompoundKind.namespace: + names.append(node.value.name) for node in node_stack: - if isinstance(node,parser.Node_refType) and len(names) == 0: - return ''.join(node) + if isinstance(node.value,parser.Node_refType) and len(names) == 0: + return ''.join(node.value) if ( isinstance(node,parser.Node_CompoundType) and node.kind not in [parser.CompoundKind.file, parser.CompoundKind.namespace, parser.CompoundKind.group] ) or isinstance(node,parser.Node_memberdefType): @@ -993,7 +990,7 @@ def detaileddescription(self, node) -> list[Node]: admonitions: list[Node] = [] def pullup(node, typ, dest): - for n in node.traverse(typ): + for n in node.findall(typ): del n.parent[n.parent.index(n)] dest.append(n) @@ -1003,7 +1000,7 @@ def pullup(node, typ, dest): pullup(candNode, nodes.note, admonitions) pullup(candNode, nodes.warning, admonitions) # and collapse paragraphs - for para in candNode.traverse(nodes.paragraph): + for para in candNode.findall(nodes.paragraph): if ( para.parent and len(para.parent) == 1 @@ -1012,7 +1009,7 @@ def pullup(node, typ, dest): para.replace_self(para.children) # and remove empty top-level paragraphs - if isinstance(candNode, nodes.paragraph) and len(candNode) == 0: + if isinstance(candNode, nodes.paragraph) and len(candNode) == 0: # type: ignore continue detailed.append(candNode) @@ -1233,7 +1230,7 @@ def visit_namespace(self, node: HasRefID) -> list[Node]: assert self.context is not None parent_context = self.context.create_child_context(file_data) - new_context = parent_context.create_child_context(file_data.compounddef) + new_context = parent_context.create_child_context(nodeDef) with WithContext(self, new_context): # Pretend that the signature is being rendered in context of the @@ -1444,10 +1441,16 @@ def render_signature(file_data, doxygen_target, name, kind) -> tuple[list[Node], (parser.DoxSectionKind.var, "Variables"), ] - def render_iterable(self, iterable: Iterable[parser.Node]) -> list[Node]: + def render_iterable(self, iterable: Iterable[parser.NodeOrValue], tag: str | None = None) -> list[Node]: output: list[Node] = [] for entry in iterable: - output.extend(self.render(entry)) + output.extend(self.render(entry, tag=tag)) + return output + + def render_tagged_iterable(self, iterable: Iterable[parser.TaggedValue[str,parser.NodeOrValue] | str]) -> list[Node]: + output: list[Node] = [] + for entry in iterable: + output.extend(self.render_tagged(entry)) return output @node_handler(parser.Node_compounddefType) @@ -1527,8 +1530,8 @@ def render_derivedcompoundref(node): addnode(kind.value, lambda: section_nodelists.get(kind.value, [])) # Take care of innerclasses - addnode("innerclass", lambda: self.render_iterable(node.innerclass)) - addnode("innernamespace", lambda: self.render_iterable(node.innernamespace)) + addnode("innerclass", lambda: self.render_iterable(node.innerclass, 'innerclass')) + addnode("innernamespace", lambda: self.render_iterable(node.innernamespace, 'innernamespace')) if "inner" in options: for cnode in node.innergroup: @@ -1588,17 +1591,20 @@ def visit_sectiondef(self, node: parser.Node_sectiondefType) -> list[Node]: return [] @node_handler(parser.Node_docRefTextType) - def visit_docreftext(self, node: parser.Node_docRefTextType | parser.Node_incType) -> list[Node]: - nodelist = self.render_iterable(node) + @node_handler(parser.Node_refTextType) + def visit_docreftext(self, node: parser.Node_docRefTextType | parser.Node_incType | parser.Node_refTextType) -> list[Node]: + if isinstance(node,parser.Node_incType): + nodelist = self.render_iterable(node) + else: + nodelist = self.render_tagged_iterable(node) - # TODO: "para" in compound.xsd is an empty tag; figure out what this is - # supposed to do - if isinstance(node,parser.Node_docRefTextType): + # TODO: "para" in compound.xsd is an empty tag; figure out what this + # is supposed to do for name,value in map(parser.tag_name_value,node): if name == 'para': nodelist.extend(self.render(value)) - refid = self.get_refid(node.refid) + refid = self.get_refid(node.refid or '') nodelist: list[Node] = [ addnodes.pending_xref( @@ -1620,7 +1626,7 @@ def visit_docheading(self, node: parser.Node_docHeadingType) -> list[Node]: Renders embedded headlines as emphasized text. Different heading levels are not supported. """ - nodelist = self.render_iterable(node) + nodelist = self.render_tagged_iterable(node) return [nodes.emphasis("", "", *nodelist)] @node_handler(parser.Node_docParaType) @@ -1635,7 +1641,7 @@ def visit_docpara(self, node: parser.Node_docParaType) -> list[Node]: nodelist = [] if self.context and self.context.directive_args[0] == "doxygenpage": - nodelist.extend(self.render_iterable(node)) + nodelist.extend(self.render_tagged_iterable(node)) else: contentNodeCands = [] for item in get_content(node): @@ -1721,12 +1727,12 @@ def visit_docimage(self, node: parser.Node_docImageType) -> list[Node]: def visit_docurllink(self, node: parser.Node_docURLLink) -> list[Node]: """Url Link Renderer""" - nodelist = self.render_iterable(node) + nodelist = self.render_tagged_iterable(node) return [nodes.reference("", "", refuri=node.url, *nodelist)] @tagged_node_handler(parser.Node_docMarkupType) def visit_docmarkup(self, tag: str, node: parser.Node_docMarkupType) -> list[Node]: - nodelist = self.render_iterable(node) + nodelist = self.render_tagged_iterable(node) creator: Type[TextElement] = nodes.inline if tag == "emphasis": creator = nodes.emphasis @@ -1759,7 +1765,7 @@ def visit_docsectN(self, node: parser.Node_docSect1Type | parser.Node_docSect2Ty section["ids"].append(self.get_refid(node.id)) section += nodes.title(node.title, node.title) section += self.create_doxygen_target(node) - section += self.render_iterable(node) + section += self.render_tagged_iterable(node) return [section] @node_handler(parser.Node_docSimpleSectType) @@ -1803,7 +1809,7 @@ def visit_docsimplesect(self, node: parser.Node_docSimpleSectType) -> list[Node] return [nodes.definition_list_item("", term, definition)] - visit_doctitle = node_handler(parser.Node_docTitleType)(render_iterable) + visit_doctitle = node_handler(parser.Node_docTitleType)(render_tagged_iterable) @node_handler(parser.Node_docFormulaType) def visit_docformula(self, node: parser.Node_docFormulaType) -> list[Node]: @@ -1841,13 +1847,16 @@ def visit_listing(self, node: parser.Node_listingType) -> list[Node]: # Add blank string at the start otherwise for some reason it renders # the pending_xref tags around the kind in plain text block = nodes.literal_block("", "", *nodelist) - domain = filetypes.get_pygments_alias(node.filename) or filetypes.get_extension(node.filename) + domain = filetypes.get_pygments_alias(node.filename or '') or filetypes.get_extension(node.filename or '') if domain: block["language"] = domain return [block] + + @node_handler(parser.Node_codelineType) + def visit_codeline(self, node: parser.Node_codelineType) -> list[Node]: + return self.render_iterable(node.highlight) - visit_codeline = node_handler(parser.Node_codelineType)(render_iterable) - visit_highlight = node_handler(parser.Node_highlightType)(render_iterable) + visit_highlight = node_handler(parser.Node_highlightType)(render_tagged_iterable) def _nested_inline_parse_with_titles(self, content, node) -> str: """ @@ -2069,7 +2078,7 @@ def visit_docvariablelist(self, node: parser.Node_docVariableListType) -> list[N @node_handler(parser.Node_docVarListEntryType) def visit_docvarlistentry(self, node: parser.Node_docVarListEntryType) -> list[Node]: - return self.render_iterable(node.term) + return self.render_tagged_iterable(node.term) @node_handler(parser.Node_docAnchorType) def visit_docanchor(self, node: parser.Node_docAnchorType) -> list[Node]: @@ -2132,9 +2141,9 @@ def merge_row_types(root, elem, elems): return [table] - visit_description = node_handler(parser.Node_descriptionType)(render_iterable) + visit_description = node_handler(parser.Node_descriptionType)(render_tagged_iterable) - visit_linkedtext = node_handler(parser.Node_linkedTextType)(render_iterable) + visit_linkedtext = node_handler(parser.Node_linkedTextType)(render_tagged_iterable) def visit_function(self, node: parser.Node_memberdefType) -> list[Node]: dom = self.get_domain() @@ -2520,7 +2529,7 @@ def visit_docparamlist(self, node: parser.Node_docParamListType) -> list[Node]: # we assume there is either 1 or 2 elements, if there is 2 the first is the # parameter direction assert len(paramName) == 1 or len(paramName) == 2, list(paramName) - thisName = self.render(paramName[-1]) + thisName = self.render_tagged(paramName[-1]) if len(nameNodes) != 0: if node.kind == parser.DoxParamListKind.exception: msg = "Doxygen \\exception commands with multiple names can not be" @@ -2535,7 +2544,7 @@ def visit_docparamlist(self, node: parser.Node_docParamListType) -> list[Node]: if len(paramName) == 2: # note, each paramName node seems to have the same direction, # so just use the last one - dir = "".join(n.astext() for n in self.render(paramName[0])).strip() + dir = "".join(n.astext() for n in self.render_tagged(paramName[0])).strip() assert dir in ("[in]", "[out]", "[inout]"), ">" + dir + "<" parameterDirectionNodes = [nodes.strong(dir, dir), nodes.Text(" ", " ")] # it seems that Sphinx expects the name to be a single node, @@ -2615,10 +2624,27 @@ def visit_docdotfile(self, node) -> list[Node]: return [nodes.figure("", graph_node, caption_node)] return [graph_node] - @node_handler(parser.Node_graphType) - def visit_docgraph(self, node: parser.Node_graphType) -> list[Node]: + @tagged_node_handler(parser.Node_graphType) + def visit_docgraph(self, tag: str, node: parser.Node_graphType) -> list[Node]: """Create a graph (generated by doxygen - not user-defined) from XML using dot syntax.""" + + assert self.context + parent = self.context.node_stack[1].value + assert isinstance(parent,parser.Node_compounddefType) + + direction = "forward" + if tag == 'incdepgraph': + caption = f"Include dependency graph for {parent.compoundname}:" + elif tag == 'invincdepgraph': + direction = 'back' + caption = f"This graph shows which files directly or indirectly include {parent.compoundname}:" + elif tag == 'inheritancegraph': + caption = f"Inheritance diagram for {parent.compoundname}:" + else: + assert tag == 'collaborationgraph' + caption = f"Collaboration diagram for {parent.compoundname}:" + # use graphs' legend from doxygen (v1.9.1) # most colors can be changed via `graphviz_dot_args` in conf.py edge_colors = { @@ -2657,7 +2683,7 @@ def visit_docgraph(self, node: parser.Node_graphType) -> list[Node]: for child_node in g_node.childnode: edge = f' "{g_node.id}"' edge += f' -> "{child_node.refid}" [' - edge += f"dir={node.direction} " + edge += f"dir={direction} " # edge labels don't appear in XML (bug?); use tooltip in meantime edge += 'tooltip="%s"' % child_node.relation.value if child_node.relation.value in edge_colors.keys(): @@ -2673,7 +2699,6 @@ def visit_docgraph(self, node: parser.Node_graphType) -> list[Node]: graph_node["code"] = dot graph_node["align"] = "center" graph_node["options"] = {} - caption = node.caption # if caption is first node in a figure, then everything that follows is # considered a caption. Use a paragraph followed by a figure to center the # graph. This may have illegible side effects for very large graphs. @@ -2753,20 +2778,24 @@ def render_string(self, node: str) -> list[Node]: return [nodes.Text(node)] return [] - def render_tagged(self, item: parser.TaggedValue[str,parser.Node] | str) -> list[Node]: + def render_tagged(self, item: parser.TaggedValue[str,parser.NodeOrValue] | str) -> list[Node]: if isinstance(item,str): return self.render_string(item) h = self.tagged_node_handlers.get(type(item.value)) if h is not None: assert self.context is not None - with WithContext(self, self.context.create_child_context(item.value)): + with WithContext(self, self.context.create_child_context(item.value, item.name)): if not self.filter_.allow(self.context.node_stack): return [] - return h(self,item.name,item.value) + return h(self,item.name, item.value) return self.render(item.value) - def render(self, node: parser.Node, context: RenderContext | None = None) -> list[Node]: + def render(self, node: parser.NodeOrValue, context: RenderContext | None = None, tag: str | None = None) -> list[Node]: + # the filters discriminate based on the tag name for Node_refType + # instances + assert tag is not None or not isinstance(node,parser.Node_refType) + if context is None: assert self.context is not None - context = self.context.create_child_context(node) + context = self.context.create_child_context(node,tag) with WithContext(self, context): assert self.context is not None result: list[Node] = [] @@ -2777,7 +2806,7 @@ def render(self, node: parser.Node, context: RenderContext | None = None) -> lis result = method(self, node) return result - def render_optional(self, node) -> list[Node]: + def render_optional(self, node, tag: str | None = None) -> list[Node]: """Render a node that can be None.""" return self.render(node) if node else [] diff --git a/tests/data/arange.xml b/tests/data/arange.xml index 8f687786..3abdfbbf 100644 --- a/tests/data/arange.xml +++ b/tests/data/arange.xml @@ -16,7 +16,7 @@ options {} - + Tensor @@ -43,7 +43,7 @@ c10::optional< bool > pin_memory - + Tensor @@ -63,7 +63,7 @@ options {} - + Tensor @@ -94,7 +94,7 @@ c10::optional< bool > pin_memory - + Tensor @@ -118,7 +118,7 @@ options {} - + Tensor @@ -153,7 +153,7 @@ c10::optional< bool > pin_memory - + diff --git a/tests/data/ellipsis.xml b/tests/data/ellipsis.xml index 68861277..fd243cbd 100644 --- a/tests/data/ellipsis.xml +++ b/tests/data/ellipsis.xml @@ -18,7 +18,7 @@ ... - + diff --git a/tests/test_renderer.py b/tests/test_renderer.py index 620ecef7..83117c19 100644 --- a/tests/test_renderer.py +++ b/tests/test_renderer.py @@ -4,7 +4,7 @@ import sphinx.locale import sphinx.addnodes import sphinx.environment -from breathe import parser +from breathe import parser, renderer from breathe.renderer.sphinxrenderer import SphinxRenderer from breathe.renderer.filter import OpenFilter import docutils.parsers.rst @@ -23,16 +23,7 @@ 'id': '', 'prot': parser.DoxProtectionKind.public, 'static': False, - 'location': parser.Node_locationType( - bodyend = 0, - bodyfile = '', - bodystart = 0, - column = 0, - declcolumn = 0, - declfile = '', - declline = 0, - file = '', - line = 0) + 'location': parser.Node_locationType(file = '', line = 0) } @pytest.fixture(scope="function") @@ -74,7 +65,7 @@ def __init__(self, app): env.temp_data["docname"] = "mock-doc" env.temp_data["breathe_project_info_factory"] = ProjectInfoFactory(app) env.temp_data["breathe_parser_factory"] = DoxygenParserFactory(app) - settings = frontend.get_default_settings(docutils.parsers.rst.Parser) # type: ignore + settings = frontend.get_default_settings(docutils.parsers.rst.Parser) settings.env = env self.document = utils.new_document("", settings) @@ -133,7 +124,7 @@ def __init__(self, app, node_stack, domain=None, options=[]): self.child = None self.mask_factory = MockMaskFactory() - def create_child_context(self, attribute): + def create_child_context(self, attribute, tag): return self @@ -516,7 +507,7 @@ def get_directive(app): return DoxygenFunctionDirective(*cls_args) -def get_matches(datafile): +def get_matches(datafile) -> tuple[list[str], list[list[renderer.TaggedNode]]]: argsstrings = [] with open(os.path.join(os.path.dirname(__file__), "data", datafile)) as fid: xml = fid.read() @@ -526,7 +517,7 @@ def get_matches(datafile): sectiondef = doc.value.compounddef[0].sectiondef[0] for child in sectiondef.memberdef: if child.argsstring: argsstrings.append(child.argsstring) - matches = [[m, sectiondef] for m in sectiondef.memberdef] + matches = [[renderer.TaggedNode(None, m), renderer.TaggedNode(None, sectiondef)] for m in sectiondef.memberdef] return argsstrings, matches diff --git a/tests/test_utils.py b/tests/test_utils.py index 37228044..6ecd8dee 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -13,7 +13,7 @@ def test_param_decl(self): - + x int @@ -45,7 +45,7 @@ def test_param_decl(self): r [3] - + diff --git a/xml_parser_generator/make_parser.py b/xml_parser_generator/make_parser.py index b7670c5b..ec4d6520 100644 --- a/xml_parser_generator/make_parser.py +++ b/xml_parser_generator/make_parser.py @@ -113,6 +113,10 @@ def fields(self) -> Iterable[TypeRef|Attribute]: yield from self.attributes.values() yield from self.children.values() + @property + def direct_field_count(self): + return len(self.attributes) + len(self.children) + def all_fields(self) -> Iterable[TypeRef|Attribute]: for b in self.bases: if isinstance(b,ElementType): diff --git a/xml_parser_generator/module_template.c b/xml_parser_generator/module_template.c index 80da77a0..276a5b91 100644 --- a/xml_parser_generator/module_template.c +++ b/xml_parser_generator/module_template.c @@ -41,7 +41,8 @@ is broken into chunks. */ enum { - CLASS_FROZEN_LIST = 0, + CLASS_NODE = 0, /* important: module_exec() assumes this comes before CLASS_FROZEN_LIST */ + CLASS_FROZEN_LIST, CLASS_FROZEN_LIST_ITR, CLASS_TAGGED_VALUE, CLASS_PARSE_ERROR, @@ -682,6 +683,11 @@ static const char *py_field_names[] = { //% endfor }; +typedef struct { + unsigned int length; + const py_field_type *fields; +} field_set; + //% macro hash_lookup(f_name,hash_info,count,names,type,default) static {$ type $} {$ f_name $}(const char *key) { //% if hash_info @@ -946,6 +952,17 @@ enum { //% endif }; +//% if type is has_fields +//% if type.direct_field_count +static const py_field_type FIELD_PY_INDICES__{$ type $}[] = { +//% for f in type.fields() + PY_FIELD__{$ f.py_name $}, +//% endfor +}; +//% endif +static Py_ssize_t assign_field_name_tuple__{$ type $}(PyObject *dest,PyObject **names,Py_ssize_t start_i); +//% endif + //% if type is has_fields static PyMemberDef node_class_members__{$ type $}[] = { //% for f in type.fields() @@ -984,6 +1001,7 @@ static int node_class_new_fields_end__{$ type $}(module_state *state,PyObject ** //% endif //% if type is has_attributes static int node_class_attr__{$ type $}(parse_state*,{$ 'PyObject**,' if type is has_fields $}attribute_type,const XML_Char**); +static int node_class_attr_end__{$ type $}(parse_state*,PyObject**); //% endif //% if type is has_children_or_content static int node_class_child__{$ type $}(parse_state*,{$ 'PyObject**,' if type is has_fields $}element_type,const XML_Char**); @@ -1060,6 +1078,10 @@ static int warn_duplicate_attribute(parse_state *state,const char *name) { return SET_PARSE_WARNING_FMT(state,"duplicate attribute \"%s\"",name); } +static void raise_missing_attribute_error(parse_state *state,const char *name) { + set_parse_error_format(state,"missing \"%s\" attribute",name); +} + static void raise_duplicate_element_error(parse_state *state,const char *name) { set_parse_error_format(state,"\"%s\" cannot appear more than once in this context",name); } @@ -1289,6 +1311,23 @@ static int node_set_py_field_frozen_list(module_state *state,PyObject **field,Py //% for type in types //% if type is element //% if type|field_count +static Py_ssize_t assign_field_name_tuple__{$ type $}(PyObject *dest,PyObject **names,Py_ssize_t start_i) { +//% if type.direct_field_count + int i; +//% endif +//% for b in type.bases if b|field_count + start_i = assign_field_name_tuple__{$ b $}(dest,start_i); +//% endfor +//% if type.direct_field_count + for(i = 0; i < FIELD_COUNT__{$ type $} - BASE_FIELD_OFFSET__{$ type $}; ++i, ++start_i) { + PyObject *name = names[FIELD_PY_INDICES__{$ type $}[i]]; + PyTuple_SetItem(dest,start_i,name); + Py_INCREF(name); + } +//% endif + return start_i; +} + static void node_class_new_set_fields__{$ type $}(PyObject **fields,PyObject *args,Py_ssize_t start_i) { //% for b in type.bases if b|field_count node_class_new_set_fields__{$ b $}(fields + BASE_FIELD_OFFSET__{$ type $}__{$ b $},args,start_i + BASE_FIELD_OFFSET__{$ type $}__{$ b $}); @@ -1376,6 +1415,23 @@ static int node_class_attr__{$ type $}(parse_state *state,{$ 'PyObject **fields, return 0; } } +static int node_class_attr_end__{$ type $}(parse_state *state, PyObject **fields) { +//% for b in type.bases if b is has_attributes + if(node_class_attr_end__{$ b $}(state),fields + BASE_FIELD_OFFSET__{$ type $}__{$ b $}) return -1; +//% endfor +//% for ref in type|attributes + if(fields[FIELD__{$ type $}__{$ ref.py_name $}] == NULL) { +//% if ref.optional + fields[FIELD__{$ type $}__{$ ref.py_name $}] = Py_None; + Py_INCREF(Py_None); +//% else + raise_missing_attribute_error(state,"{$ ref.name $}"); + return -1; +//% endif + } +//% endfor + return 0; +} //% endif //% if type is has_children_or_content static int node_class_child__{$ type $}(parse_state *state,{$ 'PyObject **fields,' if type is has_fields $}element_type e_index,const XML_Char **attr) { @@ -1536,9 +1592,9 @@ static int node_class_start__{$ type $}(parse_state *state,PyObject **dest,const //% endif //% endfor -//% if type|attributes|length or type.other_attr == OtherAttrAction.error +//% if type is has_attributes or type.other_attr == OtherAttrAction.error for(; *attr != NULL; attr += 2) { -//% if type|attributes|length +//% if type is has_attributes int r; attribute_type attr_index = attribute_lookup(attr[0]); r = node_class_attr__{$ type $}(state,n->fields,attr_index,attr); @@ -1552,6 +1608,9 @@ static int node_class_start__{$ type $}(parse_state *state,PyObject **dest,const //% endif } //% endif +//% if type is has_attributes + if(node_class_attr_end__{$ type $}(state,n->fields)) return -1; +//% endif cb = push_callbacks(state); if(cb == NULL) return -1; @@ -1702,19 +1761,34 @@ static int toplevel_start(parse_state *state,const XML_Char *child_name,const XM } } +typedef enum { + CLASS_TYPE_OTHER, + CLASS_TYPE_NODE_SUB, + CLASS_TYPE_LIST_NODE_SUB +} class_type; + typedef struct { PyType_Spec spec; - unsigned char list_base; -} spec_and_is_list; - -static spec_and_is_list class_specs[] = { - {{FULL_MODULE_STR ".FrozenList",sizeof(frozen_list),0,Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC|Py_TPFLAGS_SEQUENCE,frozen_list_slots},0}, - {{FULL_MODULE_STR ".FrozenListItr",sizeof(frozen_list_itr),0,Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_GC,frozen_list_itr_slots},0}, - {{FULL_MODULE_STR ".TaggedValue",sizeof(tagged_value),0,Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_GC|Py_TPFLAGS_SEQUENCE,tagged_value_slots},0}, - {{FULL_MODULE_STR ".ParseError",0,0,Py_TPFLAGS_DEFAULT,parse_error_slots},0}, - {{FULL_MODULE_STR ".ParseWarning",0,0,Py_TPFLAGS_DEFAULT,parse_warning_slots},0}, + Py_ssize_t field_count; + Py_ssize_t (*assign_field_name_tuple)(PyObject*,PyObject**,Py_ssize_t); + class_type type; +} spec_details; + +static PyType_Slot empty_slots[1] = {{0,NULL}}; + +static spec_details class_specs[] = { + {{FULL_MODULE_STR ".Node",0,0,Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,empty_slots},0,NULL,CLASS_TYPE_OTHER}, + {{FULL_MODULE_STR ".FrozenList",sizeof(frozen_list),0,Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC|Py_TPFLAGS_SEQUENCE,frozen_list_slots},0,NULL,CLASS_TYPE_OTHER}, + {{FULL_MODULE_STR ".FrozenListItr",sizeof(frozen_list_itr),0,Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_GC,frozen_list_itr_slots},0,NULL,CLASS_TYPE_OTHER}, + {{FULL_MODULE_STR ".TaggedValue",sizeof(tagged_value),0,Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_GC|Py_TPFLAGS_SEQUENCE,tagged_value_slots},0,NULL,CLASS_TYPE_OTHER}, + {{FULL_MODULE_STR ".ParseError",0,0,Py_TPFLAGS_DEFAULT,parse_error_slots},0,NULL,CLASS_TYPE_OTHER}, + {{FULL_MODULE_STR ".ParseWarning",0,0,Py_TPFLAGS_DEFAULT,parse_warning_slots},0,NULL,CLASS_TYPE_OTHER}, //% for type in types|select('used_directly') - {{FULL_MODULE_STR ".Node_{$ type $}",offsetof(node_{$ common_affix(type) $}_common,fields){% if type is has_fields %} + sizeof(PyObject*)*FIELD_COUNT__{$ type $}{% endif %},0,Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_GC,node_class_slots__{$ type $}},{$ '1' if type is list_e else '0' $}}, + {{FULL_MODULE_STR ".Node_{$ type $}",offsetof(node_{$ common_affix(type) $}_common,fields){% + if type is has_fields %} + sizeof(PyObject*)*FIELD_COUNT__{$ type $}{% endif + %},0,Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_GC,node_class_slots__{$ type $}},FIELD_COUNT__{$ type $},{$ + 'assign_field_name_tuple__'~type if type is has_fields else 'NULL' + $},CLASS_TYPE_{$ 'LIST_' if type is list_e $}NODE_SUB}, //% endfor //% for type in types|select('content_tuple') {{FULL_MODULE_STR ".ListItem_{$ type $}",offsetof(tuple_item,fields) + sizeof(PyObject*)*TUPLE_ITEM_FIELD_COUNT__{$ type $},0,Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_GC,tuple_item_slots__{$ type $}},0}, @@ -2022,11 +2096,16 @@ static int create_enum( } static int module_exec(PyObject *module) { + PyObject *field_name_objs[PY_FIELD_COUNT]; PyObject *enum_base=NULL; PyObject *frozen_list_bases=NULL; - size_t i=0; + PyObject *node_bases=NULL; + PyObject *fields_str=NULL; + size_t i; + size_t class_i=0; size_t tu_i=0; size_t char_i=0; + size_t field_i=0; module_state *state = PyModule_GetState(module); for(; tu_iclasses[CLASS_NODE] != NULL); + state->classes[i] = (PyTypeObject*)PyType_FromSpec(&class_specs[i].spec); + if(state->classes[i] == NULL) goto error; + ++class_i; - frozen_list_bases = PyTuple_New(1); + frozen_list_bases = PyTuple_New(2); if(frozen_list_bases == NULL) goto error; PyTuple_SetItem(frozen_list_bases,0,(PyObject*)state->classes[i]); Py_INCREF(state->classes[i]); + PyTuple_SetItem(frozen_list_bases,1,(PyObject*)state->classes[CLASS_NODE]); + Py_INCREF(state->classes[CLASS_NODE]); + + node_bases = PyTuple_New(1); + if(node_bases == NULL) goto error; + PyTuple_SetItem(node_bases,0,(PyObject*)state->classes[CLASS_NODE]); + Py_INCREF(state->classes[CLASS_NODE]); } else if(i == CLASS_PARSE_ERROR) { - state->classes[i] = (PyTypeObject*)PyType_FromSpecWithBases(&class_specs[i].spec,PyExc_RuntimeError); + PyObject *bases = PyTuple_New(1); + if(bases == NULL) goto error; + PyTuple_SetItem(bases,0,PyExc_RuntimeError); + Py_INCREF(PyExc_RuntimeError); + state->classes[i] = (PyTypeObject*)PyType_FromSpecWithBases(&class_specs[i].spec,bases); + Py_DECREF(bases); + if(state->classes[i] == NULL) goto error; + ++class_i; } else if(i == CLASS_PARSE_WARNING) { - state->classes[i] = (PyTypeObject*)PyType_FromSpecWithBases(&class_specs[i].spec,PyExc_UserWarning); - } else if(class_specs[i].list_base) { - assert(frozen_list_bases != NULL); - state->classes[i] = (PyTypeObject*)PyType_FromSpecWithBases(&class_specs[i].spec,frozen_list_bases); - } else { + PyObject *bases = PyTuple_New(1); + if(bases == NULL) goto error; + PyTuple_SetItem(bases,0,PyExc_UserWarning); + Py_INCREF(PyExc_UserWarning); + state->classes[i] = (PyTypeObject*)PyType_FromSpecWithBases(&class_specs[i].spec,bases); + Py_DECREF(bases); + if(state->classes[i] == NULL) goto error; + ++class_i; + } else if(class_specs[i].type == CLASS_TYPE_OTHER) { state->classes[i] = (PyTypeObject*)PyType_FromSpec(&class_specs[i].spec); + if(state->classes[i] == NULL) goto error; + ++class_i; + } else { + PyObject *field_tuple; + int r=0; + + assert(frozen_list_bases != NULL && node_bases != NULL); + state->classes[i] = (PyTypeObject*)PyType_FromSpecWithBases( + &class_specs[i].spec, + class_specs[i].type == CLASS_TYPE_LIST_NODE_SUB ? frozen_list_bases : node_bases); + + if(state->classes[i] == NULL) goto error; + ++class_i; + + assert((class_specs[i].field_count == 0) == (class_specs[i].assign_field_name_tuple == NULL)); + field_tuple = PyTuple_New(class_specs[i].field_count); + if(field_tuple == NULL) goto error; + if(class_specs[i].field_count) { +#ifndef NDEBUG + Py_ssize_t count = class_specs[i].assign_field_name_tuple(field_tuple,field_name_objs,0); + assert(count == class_specs[i].field_count); +#else + class_specs[i].assign_field_name_tuple(field_tuple,field_name_objs,0); +#endif + } + r = PyObject_SetAttr((PyObject*)state->classes[i],fields_str,field_tuple); + Py_DECREF(field_tuple); + if(r < 0) goto error; } - if(state->classes[i] == NULL) goto error; + if(PyModule_AddObject(module,class_specs[i].spec.name + sizeof(FULL_MODULE_STR),(PyObject*)state->classes[i])) { - ++i; goto error; }; Py_INCREF(state->classes[i]); } Py_DECREF(frozen_list_bases); + Py_DECREF(node_bases); + Py_DECREF(fields_str); + decref_array(field_name_objs,field_i); return 0; error: + Py_XDECREF(fields_str); Py_XDECREF(enum_base); + Py_XDECREF(node_bases); Py_XDECREF(frozen_list_bases); //% for type in types|select('enumeration_t') if(state->enum_values__{$ type $}[0] != NULL) decref_array(state->enum_values__{$ type $},ENUM_VALUE_COUNT__{$ type $}); //% endfor - decref_array((PyObject**)state->classes,i); + decref_array(field_name_objs,field_i); + decref_array((PyObject**)state->classes,class_i); decref_array(state->tag_names,tu_i); decref_array(state->char_objects,char_i); state->classes[0] = NULL; diff --git a/xml_parser_generator/schema.json b/xml_parser_generator/schema.json index 45ede889..06a928f4 100644 --- a/xml_parser_generator/schema.json +++ b/xml_parser_generator/schema.json @@ -7,7 +7,6 @@ "DoxygenTypeIndex": { "kind": "tag_only_element", "attributes": { - "lang": {"type": "#string"}, "version": {"type": "#string"} }, "other_attr": "ignore", @@ -29,7 +28,6 @@ "DoxygenType": { "kind": "tag_only_element", "attributes": { - "lang": {"type": "#string"}, "version": {"type": "#string"} }, "other_attr": "ignore", @@ -46,7 +44,7 @@ "inline": {"type": "#DoxBool", "optional": true}, "kind": {"type": "DoxCompoundKind"}, "language": {"type": "DoxLanguage", "optional": true}, - "prot": {"type": "DoxProtectionKind"}, + "prot": {"type": "DoxProtectionKind", "optional": true}, "sealed": {"type": "#DoxBool", "optional": true} }, "children": { @@ -140,7 +138,7 @@ "listingType": { "kind": "tag_only_element", "attributes": { - "filename": {"type": "#string"} + "filename": {"type": "#string", "optional": true} }, "children": { "codeline": {"type": "codelineType", "is_list": true, "min_items": 0} @@ -149,15 +147,15 @@ "locationType": { "kind": "tag_only_element", "attributes": { - "bodyend": {"type": "#integer"}, - "bodyfile": {"type": "#string"}, - "bodystart": {"type": "#integer"}, - "column": {"type": "#integer"}, - "declcolumn": {"type": "#integer"}, - "declfile": {"type": "#string"}, - "declline": {"type": "#integer"}, + "bodyend": {"type": "#integer", "optional": true}, + "bodyfile": {"type": "#string", "optional": true}, + "bodystart": {"type": "#integer", "optional": true}, + "column": {"type": "#integer", "optional": true}, + "declcolumn": {"type": "#integer", "optional": true}, + "declfile": {"type": "#string", "optional": true}, + "declline": {"type": "#integer", "optional": true}, "file": {"type": "#string"}, - "line": {"type": "#integer"} + "line": {"type": "#integer", "optional": true} } }, "listofallmembersType": { @@ -169,7 +167,7 @@ "memberRefType": { "kind": "tag_only_element", "attributes": { - "ambiguityscope": {"type": "#string"}, + "ambiguityscope": {"type": "#string", "optional": true}, "prot": {"type": "DoxProtectionKind"}, "refid": {"type": "#string"}, "virt": {"type": "DoxVirtualKind"} @@ -292,7 +290,7 @@ "kind": "union_list_element", "allow_text": true, "attributes": { - "compoundref": {"type": "#string"}, + "compoundref": {"type": "#string", "optional": true}, "endline": {"type": "#integer"}, "refid": {"type": "#string"}, "startline": {"type": "#integer"} @@ -335,7 +333,7 @@ "linkType": { "kind": "tag_only_element", "attributes": { - "external": {"type": "#string"}, + "external": {"type": "#string", "optional": true}, "refid": {"type": "#string"} } }, @@ -516,7 +514,7 @@ "kind": "union_list_element", "allow_text": true, "attributes": { - "direction": {"type": "DoxParamDir"} + "direction": {"type": "DoxParamDir", "optional": true} }, "content": { "ref": "refTextType" @@ -534,7 +532,7 @@ "kind": "union_list_element", "allow_text": true, "attributes": { - "refid": {"type": "#string"}, + "refid": {"type": "#string", "optional": true}, "local": {"type": "#DoxBool"} } }, @@ -1004,7 +1002,7 @@ "attributes": { "refid": {"type": "#string"}, "kindref": {"type": "#string"}, - "external": {"type": "#string"} + "external": {"type": "#string", "optional": true} } }, "docHeadingType": { @@ -1018,6 +1016,7 @@ "kind": "union_list_element", "bases": ["docTitleCmdGroup"], "attributes": { + "name": {"type": "#string", "optional": true}, "width": {"type": "#string", "optional": true}, "height": {"type": "#string", "optional": true} } diff --git a/xml_parser_generator/stubs_template.pyi b/xml_parser_generator/stubs_template.pyi index 050fcac2..c101eff6 100644 --- a/xml_parser_generator/stubs_template.pyi +++ b/xml_parser_generator/stubs_template.pyi @@ -1,5 +1,5 @@ import enum -from typing import Generic,Literal,overload,Protocol,SupportsIndex,TypeVar +from typing import ClassVar, Generic, Literal, overload, Protocol, Self, SupportsIndex, TypeVar from collections.abc import Iterable T = TypeVar('T',covariant=True) @@ -9,7 +9,7 @@ class SupportsRead(Protocol): def read(self, length: int, /) -> bytes | bytearray: ... class FrozenListItr(Generic[T]): - def __iter__(self) -> FrozenListItr: ... + def __iter__(self) -> Self: ... def __next__(self) -> T: ... class FrozenList(Generic[T]): @@ -35,6 +35,9 @@ class TaggedValue(Generic[T, U]): @overload def __getitem__(self, i: SupportsIndex) -> T | U: ... +class Node: + _fields: ClassVar[tuple[str, ...]] + class ParseError(RuntimeError): @property def message(self) -> str: ... @@ -106,7 +109,7 @@ ListItem_{$ type $} = ( {$ "invalid content type"|error $} //% endif //% if type is used_directly -class Node_{$ type $}{$ '(FrozenList['~list_item_type~'])' if type is list_e $}: +class Node_{$ type $}({$ 'FrozenList['~list_item_type~'], ' if type is list_e $}Node): {$ emit_fields(type) $} def __init__(self{$ ', __items: Iterable['~list_item_type~'], /' if type is list_e $} {%- if type|field_count -%}, * @@ -124,9 +127,3 @@ class {$ type $}(enum.Enum): {$ type $} = Literal[{% for c in type.values %}{$ "'"~c~"'" $}{$ ',' if not loop.last $}{% endfor %}] //% endif //% endfor - -Node = ( -//% for type in types|map(attribute='py_name')|sort|unique - {$ '| ' if not loop.first $}{$ type $} -//% endfor -) From eee9d2275416fe61f93e14b842899755e12a5ecb Mon Sep 17 00:00:00 2001 From: Rouslan Korneychuk Date: Sun, 26 Nov 2023 17:02:06 -0500 Subject: [PATCH 21/65] Appears to be working now --- breathe/renderer/filter.py | 11 ++-- breathe/renderer/sphinxrenderer.py | 96 +++++++++++++++++------------- 2 files changed, 58 insertions(+), 49 deletions(-) diff --git a/breathe/renderer/filter.py b/breathe/renderer/filter.py index 97413b82..00c0ea45 100644 --- a/breathe/renderer/filter.py +++ b/breathe/renderer/filter.py @@ -358,9 +358,8 @@ def __call__(self, node_stack): class NamespaceAccessor(Accessor): def __call__(self, node_stack): - r = [] - r.extend(self.selector(node_stack).value.innernamespace) # type: ignore - r.extend(self.selector(node_stack).value.innerclass) # type: ignore + r = set(''.join(ns) for ns in self.selector(node_stack).value.innernamespace) # type: ignore + r.update(''.join(ns) for ns in self.selector(node_stack).value.innerclass) # type: ignore return r @@ -881,11 +880,11 @@ def create_outline_filter(self, options: dict[str, Any]) -> Filter: return OpenFilter() def create_file_filter(self, filename: str, options: dict[str, Any]) -> Filter: - valid_names: list[str] = [] + valid_names: set[str] = set() def gather_namespaces(node: parser.Node_compounddefType): - valid_names.extend(''.join(ns) for ns in node.innernamespace) - valid_names.extend(''.join(ns) for ns in node.innerclass) + valid_names.update(''.join(ns) for ns in node.innernamespace) + valid_names.update(''.join(ns) for ns in node.innerclass) return False filter_ = AndFilter( diff --git a/breathe/renderer/sphinxrenderer.py b/breathe/renderer/sphinxrenderer.py index 723a72ea..31a51dac 100644 --- a/breathe/renderer/sphinxrenderer.py +++ b/breathe/renderer/sphinxrenderer.py @@ -1262,30 +1262,30 @@ def visit_compound( node: HasRefID, render_empty_node=True, *, - get_node_info: Callable[[parser.Node_DoxygenType], tuple[str, parser.CompoundKind]] | None = None, + get_node_info: Callable[[parser.Node_DoxygenType], tuple[str, parser.DoxCompoundKind]] | None = None, render_signature: Callable[ - [parser.Node_DoxygenType,Sequence[Element],str,parser.CompoundKind], + [parser.Node_DoxygenType,Sequence[Element],str,parser.DoxCompoundKind], tuple[list[Node],addnodes.desc_content]] | None = None) -> list[Node]: # Read in the corresponding xml file and process file_data = self.compound_parser.parse(node.refid) assert len(file_data.compounddef) == 1 - def def_get_node_info(file_data): + def def_get_node_info(file_data) -> tuple[str, parser.DoxCompoundKind]: assert isinstance(node,parser.Node_CompoundType) - return node.name, node.kind + return node.name, parser.DoxCompoundKind(node.kind.value) if get_node_info is None: get_node_info = def_get_node_info name, kind = get_node_info(file_data) - if kind == parser.CompoundKind.union: + if kind == parser.DoxCompoundKind.union: dom = self.get_domain() assert not dom or dom in ("c", "cpp") return self.visit_union(node) - elif kind in (parser.CompoundKind.struct, parser.CompoundKind.class_, parser.CompoundKind.interface): + elif kind in (parser.DoxCompoundKind.struct, parser.DoxCompoundKind.class_, parser.DoxCompoundKind.interface): dom = self.get_domain() if not dom or dom in ("c", "cpp", "py", "cs"): return self.visit_class(node) - elif kind == parser.CompoundKind.namespace: + elif kind == parser.DoxCompoundKind.namespace: dom = self.get_domain() if not dom or dom in ("c", "cpp", "py", "cs"): return self.visit_namespace(node) @@ -1302,7 +1302,7 @@ def def_render_signature( file_data: parser.Node_DoxygenType, doxygen_target, name, - kind: parser.CompoundKind) -> tuple[list[Node],addnodes.desc_content]: + kind: parser.DoxCompoundKind) -> tuple[list[Node],addnodes.desc_content]: # Defer to domains specific directive. assert len(file_data.compounddef) == 1 @@ -1310,7 +1310,7 @@ def def_render_signature( arg = "%s %s" % (templatePrefix, self.get_fully_qualified_name()) # add base classes - if kind in (parser.CompoundKind.class_, parser.CompoundKind.struct): + if kind in (parser.DoxCompoundKind.class_, parser.DoxCompoundKind.struct): bs = [] for base in file_data.compounddef[0].basecompoundref: b = [] @@ -1593,6 +1593,8 @@ def visit_sectiondef(self, node: parser.Node_sectiondefType) -> list[Node]: @node_handler(parser.Node_docRefTextType) @node_handler(parser.Node_refTextType) def visit_docreftext(self, node: parser.Node_docRefTextType | parser.Node_incType | parser.Node_refTextType) -> list[Node]: + nodelist: list[Node] + if isinstance(node,parser.Node_incType): nodelist = self.render_iterable(node) else: @@ -1606,7 +1608,8 @@ def visit_docreftext(self, node: parser.Node_docRefTextType | parser.Node_incTyp refid = self.get_refid(node.refid or '') - nodelist: list[Node] = [ + assert nodelist + nodelist = [ addnodes.pending_xref( "", reftype="ref", @@ -1882,10 +1885,10 @@ def _nested_inline_parse_with_titles(self, content, node) -> str: self.state.memo.title_styles = surrounding_title_styles self.state.memo.section_level = surrounding_section_level - def visit_verbatim(self, node) -> list[Node]: - if not node.text.strip().startswith("embed:rst"): + def visit_verbatim(self, node: str) -> list[Node]: + if not node.strip().startswith("embed:rst"): # Remove trailing new lines. Purely subjective call from viewing results - text = node.text.rstrip() + text = node.rstrip() # Handle has a preformatted text return [nodes.literal_block(text, text)] @@ -1897,36 +1900,36 @@ def visit_verbatim(self, node) -> list[Node]: # However This would have a side-effect for any users who have an rst-block # consisting of a simple bullet list. # For now we just look for an extended embed tag - if node.text.strip().startswith("embed:rst:leading-asterisk"): - lines = node.text.splitlines() + if node.strip().startswith("embed:rst:leading-asterisk"): + lines = node.splitlines() # Replace the first * on each line with a blank space lines = map(lambda text: text.replace("*", " ", 1), lines) - node.text = "\n".join(lines) + node = "\n".join(lines) # do we need to strip leading ///? - elif node.text.strip().startswith("embed:rst:leading-slashes"): - lines = node.text.splitlines() + elif node.strip().startswith("embed:rst:leading-slashes"): + lines = node.splitlines() # Replace the /// on each line with three blank spaces lines = map(lambda text: text.replace("///", " ", 1), lines) - node.text = "\n".join(lines) + node = "\n".join(lines) - elif node.text.strip().startswith("embed:rst:inline"): + elif node.strip().startswith("embed:rst:inline"): # Inline all text inside the verbatim - node.text = "".join(node.text.splitlines()) + node = "".join(node.splitlines()) is_inline = True if is_inline: - text = node.text.replace("embed:rst:inline", "", 1) + node = node.replace("embed:rst:inline", "", 1) else: # Remove the first line which is "embed:rst[:leading-asterisk]" - text = "\n".join(node.text.split("\n")[1:]) + node = "\n".join(node.split("\n")[1:]) # Remove starting whitespace - text = textwrap.dedent(text) + node = textwrap.dedent(node) # Inspired by autodoc.py in Sphinx rst = StringList() - for line in text.split("\n"): + for line in node.split("\n"): rst.append(line, "") # Parent node for the generated node subtree @@ -1962,7 +1965,7 @@ def visit_inc(self, node: parser.Node_incType) -> list[Node]: @node_handler(parser.Node_refType) def visit_ref(self, node: parser.Node_refType) -> list[Node]: - def get_node_info(file_data): + def get_node_info(file_data: parser.Node_DoxygenType): name = ''.join(node) name = name.rsplit("::", 1)[-1] assert len(file_data.compounddef) == 1 @@ -2017,6 +2020,7 @@ def visit_compoundref(self, node: parser.Node_compoundRefType) -> list[Node]: if node.refid is not None: refid = self.get_refid(node.refid) if refid is not None: + assert nodelist nodelist = [ addnodes.pending_xref( "", @@ -2168,7 +2172,7 @@ def visit_function(self, node: parser.Node_memberdefType) -> list[Node]: elements.append("static") if node.inline: elements.append("inline") - if node.kind == "friend": + if node.kind == parser.DoxMemberKind.friend: elements.append("friend") if node.virt in (parser.DoxVirtualKind.virtual, parser.DoxVirtualKind.pure_virtual): elements.append("virtual") @@ -2566,11 +2570,17 @@ def visit_docparamlist(self, node: parser.Node_docParamListType) -> list[Node]: fieldList += field return [fieldList] - def visit_docdot(self, node) -> list[Node]: + @node_handler(parser.Node_docDotMscType) + def visit_docdot(self, node: parser.Node_docDotMscType) -> list[Node]: """Translate node from doxygen's dot command to sphinx's graphviz directive.""" graph_node = graphviz() - if node.content_ and node.content_[0].getValue().rstrip("\n"): - graph_node["code"] = node.content_[0].getValue() + str_value = '' + if len(node): + val = node[0] + assert isinstance(val,str) + str_value = val + if str_value.rstrip("\n"): + graph_node["code"] = str_value else: graph_node["code"] = "" # triggers another warning from sphinx.ext.graphviz self.state.document.reporter.warning( @@ -2585,10 +2595,11 @@ def visit_docdot(self, node) -> list[Node]: return [nodes.figure("", graph_node, caption_node)] return [graph_node] - def visit_docdotfile(self, node) -> list[Node]: + @node_handler(parser.Node_docImageFileType) + def visit_docdotfile(self, node: parser.Node_docImageFileType) -> list[Node]: """Translate node from doxygen's dotfile command to sphinx's graphviz directive.""" dotcode = "" - dot_file_path = node.name # type: str + dot_file_path: str = node.name or '' # Doxygen v1.9.3+ uses a relative path to specify the dot file. # Previously, Doxygen used an absolute path. # This relative path is with respect to the XML_OUTPUT path. @@ -2617,7 +2628,7 @@ def visit_docdotfile(self, node) -> list[Node]: graph_node = graphviz() graph_node["code"] = dotcode graph_node["options"] = {"docname": dot_file_path} - caption = "" if not node.content_ else node.content_[0].getValue() + caption = '' if len(node) == 0 else parser.tag_name_value(node[0])[1] if caption: caption_node = nodes.caption(caption, "") caption_node += nodes.Text(caption) @@ -2742,11 +2753,11 @@ def dispatch_memberdef(self, node: parser.Node_memberdefType) -> list[Node]: return self.visit_friendclass(node) return self.render_declaration(node, update_signature=self.update_signature) - #methods: dict[str, Callable[[SphinxRenderer, Any], list[Node]]] = { - # "verbatim": visit_verbatim, - # "docdotfile": visit_docdotfile, - # "docdot": visit_docdot, - #} + @tagged_node_handler(str) + def visit_string(self, tag: str, node: str) -> list[Node]: + if tag == 'verbatim': + return self.visit_verbatim(node) + return self.render_string(node) @node_handler(str) def render_string(self, node: str) -> list[Node]: @@ -2789,10 +2800,6 @@ def render_tagged(self, item: parser.TaggedValue[str,parser.NodeOrValue] | str) return self.render(item.value) def render(self, node: parser.NodeOrValue, context: RenderContext | None = None, tag: str | None = None) -> list[Node]: - # the filters discriminate based on the tag name for Node_refType - # instances - assert tag is not None or not isinstance(node,parser.Node_refType) - if context is None: assert self.context is not None context = self.context.create_child_context(node,tag) @@ -2802,7 +2809,10 @@ def render(self, node: parser.NodeOrValue, context: RenderContext | None = None, if not self.filter_.allow(self.context.node_stack): pass else: - method = self.node_handlers.get(type(node), SphinxRenderer.visit_unknown) + method = self.node_handlers.get(type(node)) + if method is None: + assert type(node) not in self.tagged_node_handlers + method = SphinxRenderer.visit_unknown result = method(self, node) return result From 58aadd47cec58f0b30edaca42238434bd6093f68 Mon Sep 17 00:00:00 2001 From: Rouslan Korneychuk Date: Wed, 29 Nov 2023 18:03:54 -0500 Subject: [PATCH 22/65] Replaced filter objects with plain functions --- .gitignore | 4 + README.rst | 2 +- breathe/directives/__init__.py | 10 +- breathe/directives/class_like.py | 28 +- breathe/directives/content_block.py | 49 +- breathe/directives/function.py | 23 +- breathe/directives/item.py | 39 +- breathe/filetypes.py | 5 +- breathe/finder/__init__.py | 4 +- breathe/finder/compound.py | 28 +- breathe/finder/factory.py | 8 +- breathe/finder/index.py | 22 +- breathe/project.py | 19 +- breathe/renderer/__init__.py | 4 +- breathe/renderer/filter.py | 1251 ++++++++------------------- breathe/renderer/sphinxrenderer.py | 75 +- breathe/renderer/target.py | 16 +- setup.py | 4 +- tests/test_renderer.py | 3 +- 19 files changed, 576 insertions(+), 1018 deletions(-) diff --git a/.gitignore b/.gitignore index d1598d62..e453ca57 100644 --- a/.gitignore +++ b/.gitignore @@ -48,3 +48,7 @@ com_crashlytics_export_strings.xml # modified by build process examples/doxygen/example.tag examples/specific/dot_graphs/xml/dotfile.dot + +# generated in editable install +/breathe/_parser.pyi +/breathe/_parser.abi3.* diff --git a/README.rst b/README.rst index bfc11d78..560dc9b0 100644 --- a/README.rst +++ b/README.rst @@ -98,7 +98,7 @@ branch. Requirements ------------ -Breathe requires Python 3.6+, Sphinx 4.0+ and Doxygen 1.8+. +Breathe requires Python 3.8+, Sphinx 4.0+ and Doxygen 1.8+. Mailing List Archives --------------------- diff --git a/breathe/directives/__init__.py b/breathe/directives/__init__.py index 9e5c62e3..4609c906 100644 --- a/breathe/directives/__init__.py +++ b/breathe/directives/__init__.py @@ -6,7 +6,7 @@ from breathe.renderer.filter import FilterFactory from breathe.renderer.sphinxrenderer import SphinxRenderer -from sphinx.directives import SphinxDirective +from sphinx.directives import SphinxDirective # pyright: ignore from docutils import nodes @@ -16,7 +16,7 @@ from breathe.parser import DoxygenParserFactory from breathe.project import ProjectInfoFactory, ProjectInfo from breathe.renderer import TaggedNode - from breathe.renderer.filter import Filter + from breathe.renderer.filter import DoxFilter from breathe.renderer.mask import MaskFactoryBase from breathe.renderer.target import TargetHandler @@ -77,10 +77,6 @@ def finder_factory(self) -> FinderFactory: def filter_factory(self) -> FilterFactory: return FilterFactory(self.env.app) - @property - def kind(self) -> str: - raise NotImplementedError - def create_warning(self, project_info: ProjectInfo | None, **kwargs) -> _WarningHandler: if project_info: tail = 'in doxygen xml output for project "{project}" from directory: {path}'.format( @@ -96,7 +92,7 @@ def render( self, node_stack: list[TaggedNode], project_info: ProjectInfo, - filter_: Filter, + filter_: DoxFilter, target_handler: TargetHandler, mask_factory: MaskFactoryBase, directive_args, diff --git a/breathe/directives/class_like.py b/breathe/directives/class_like.py index cc7fc780..1ccb8756 100644 --- a/breathe/directives/class_like.py +++ b/breathe/directives/class_like.py @@ -8,14 +8,33 @@ from docutils.parsers.rst.directives import unchanged_required, unchanged, flag -from typing import TYPE_CHECKING +from typing import cast, ClassVar, TYPE_CHECKING if TYPE_CHECKING: + from typing_extensions import NotRequired, TypedDict from breathe import renderer from docutils.nodes import Node + DoxClassOptions = TypedDict('DoxClassOptions',{ + 'path': str, + 'project': str, + 'members': NotRequired[str], + 'membergroups': str, + 'members-only': NotRequired[None], + 'protected-members': NotRequired[None], + 'private-members': NotRequired[None], + 'undoc-members': NotRequired[None], + 'show': str, + 'outline': NotRequired[None], + 'no-link': NotRequired[None], + 'allow-dot-graphs': NotRequired[None]}) +else: + DoxClassOptions = None + class _DoxygenClassLikeDirective(BaseDirective): + kind: ClassVar[str] + required_arguments = 1 optional_arguments = 0 final_argument_whitespace = True @@ -37,9 +56,10 @@ class _DoxygenClassLikeDirective(BaseDirective): def run(self) -> list[Node]: name = self.arguments[0] + options = cast(DoxClassOptions,self.options) try: - project_info = self.project_info_factory.create_project_info(self.options) + project_info = self.project_info_factory.create_project_info(options) except ProjectError as e: warning = self.create_warning(None, kind=self.kind) return warning.warn("doxygen{kind}: %s" % e) @@ -59,8 +79,8 @@ def run(self) -> list[Node]: warning = self.create_warning(project_info, name=name, kind=self.kind) return warning.warn('doxygen{kind}: Cannot find class "{name}" {tail}') - target_handler = create_target_handler(self.options, project_info, self.state.document) - filter_ = self.filter_factory.create_class_filter(name, self.options) + target_handler = create_target_handler(options, project_info, self.state.document) + filter_ = self.filter_factory.create_class_filter(name, options) mask_factory = NullMaskFactory() return self.render( diff --git a/breathe/directives/content_block.py b/breathe/directives/content_block.py index bc3f560f..be6ebd23 100644 --- a/breathe/directives/content_block.py +++ b/breathe/directives/content_block.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from breathe.directives import BaseDirective from breathe.file_state_cache import MTimeError from breathe.project import ProjectError @@ -5,16 +7,38 @@ from breathe.renderer.mask import NullMaskFactory from breathe.renderer.sphinxrenderer import SphinxRenderer from breathe.renderer.target import create_target_handler +from breathe import parser from docutils.nodes import Node from docutils.parsers.rst.directives import unchanged_required, flag -from typing import Any, List +from typing import Any, cast, ClassVar, Literal, TYPE_CHECKING + +if TYPE_CHECKING: + from typing_extensions import NotRequired, TypedDict + + DoxContentBlockOptions = TypedDict('DoxContentBlockOptions',{ + 'path': str, + 'project': str, + 'content-only': NotRequired[None], + 'members': NotRequired[str], + 'protected-members': NotRequired[None], + 'private-members': NotRequired[None], + 'undoc-members': NotRequired[None], + 'show': str, + 'outline': NotRequired[None], + 'no-link': NotRequired[None], + 'desc-only': NotRequired[None], + 'sort': NotRequired[None]}) +else: + DoxContentBlockOptions = None class _DoxygenContentBlockDirective(BaseDirective): """Base class for namespace and group directives which have very similar behaviours""" + kind: ClassVar[Literal['group', 'page', 'namespace']] + required_arguments = 1 optional_arguments = 1 option_spec = { @@ -32,11 +56,12 @@ class _DoxygenContentBlockDirective(BaseDirective): } has_content = False - def run(self) -> List[Node]: + def run(self) -> list[Node]: name = self.arguments[0] + options = cast(DoxContentBlockOptions,self.options) try: - project_info = self.project_info_factory.create_project_info(self.options) + project_info = self.project_info_factory.create_project_info(options) except ProjectError as e: warning = self.create_warning(None, kind=self.kind) return warning.warn("doxygen{kind}: %s" % e) @@ -50,7 +75,7 @@ def run(self) -> List[Node]: finder_filter = self.filter_factory.create_finder_filter(self.kind, name) # TODO: find a more specific type for the Doxygen nodes - matches: List[Any] = [] + matches: list[Any] = [] finder.filter_(finder_filter, matches) # It shouldn't be possible to have too many matches as namespaces & groups in their nature @@ -59,27 +84,27 @@ def run(self) -> List[Node]: warning = self.create_warning(project_info, name=name, kind=self.kind) return warning.warn('doxygen{kind}: Cannot find {kind} "{name}" {tail}') - if "content-only" in self.options and self.kind != "page": + if "content-only" in options and self.kind != "page": # Unpack the single entry in the matches list (node_stack,) = matches - filter_ = self.filter_factory.create_content_filter(self.kind, self.options) + filter_ = self.filter_factory.create_content_filter(self.kind, options) # Having found the compound node for the namespace or group in the index we want to grab # the contents of it which match the filter contents_finder = self.finder_factory.create_finder_from_root( node_stack[0], project_info ) # TODO: find a more specific type for the Doxygen nodes - contents: List[Any] = [] + contents: list[Any] = [] contents_finder.filter_(filter_, contents) # Replaces matches with our new starting points matches = contents - target_handler = create_target_handler(self.options, project_info, self.state.document) - filter_ = self.filter_factory.create_render_filter(self.kind, self.options) + target_handler = create_target_handler(options, project_info, self.state.document) + filter_ = self.filter_factory.create_render_filter(self.kind, options) - node_list: List[Node] = [] + node_list: list[Node] = [] for node_stack in matches: object_renderer = SphinxRenderer( self.parser_factory.app, @@ -94,7 +119,9 @@ def run(self) -> List[Node]: mask_factory = NullMaskFactory() context = RenderContext(node_stack, mask_factory, self.directive_args) - node_list.extend(object_renderer.render(context.node_stack[0], context)) + value = context.node_stack[0].value + assert isinstance(value,parser.Node) + node_list.extend(object_renderer.render(value, context)) return node_list diff --git a/breathe/directives/function.py b/breathe/directives/function.py index b4f7b76c..04371649 100644 --- a/breathe/directives/function.py +++ b/breathe/directives/function.py @@ -18,12 +18,21 @@ import re -from typing import Any, List, Optional, TYPE_CHECKING +from typing import cast, List, Optional, TYPE_CHECKING if TYPE_CHECKING: + from typing_extensions import NotRequired, TypedDict from breathe import project from docutils.nodes import Node + DoxFunctionOptions = TypedDict('DoxFunctionOptions',{ + 'path': str, + 'project': str, + 'outline': NotRequired[None], + 'no-link': NotRequired[None]}) +else: + DoxFunctionOptions = None + class _NoMatchingFunctionError(BreatheError): pass @@ -68,8 +77,10 @@ def run(self) -> List[Node]: function_name = match.group(2).strip() argsStr = match.group(3) + options = cast(DoxFunctionOptions,self.options) + try: - project_info = self.project_info_factory.create_project_info(self.options) + project_info = self.project_info_factory.create_project_info(options) except ProjectError as e: warning = self.create_warning(None) return warning.warn("doxygenfunction: %s" % e) @@ -83,7 +94,7 @@ def run(self) -> List[Node]: # Extract arguments from the function name. try: args = self._parse_args(argsStr) - except cpp.DefinitionError as e: + except cpp.DefinitionError as e: # pyright: ignore return self.create_warning( project_info, namespace="%s::" % namespace if namespace else "", @@ -142,7 +153,7 @@ def run(self) -> List[Node]: warning_nodes = [nodes.paragraph("", "", nodes.Text(formatted_message)), block] result = warning.warn(message, rendered_nodes=warning_nodes, unformatted_suffix=text) return result - except cpp.DefinitionError as error: + except cpp.DefinitionError as error: # pyright: ignore warning.context["cpperror"] = str(error) return warning.warn( "doxygenfunction: Unable to resolve function " @@ -150,8 +161,8 @@ def run(self) -> List[Node]: "Candidate function could not be parsed. Parsing error is\n{cpperror}" ) - target_handler = create_target_handler(self.options, project_info, self.state.document) - filter_ = self.filter_factory.create_outline_filter(self.options) + target_handler = create_target_handler(options, project_info, self.state.document) + filter_ = self.filter_factory.create_outline_filter(options) return self.render( node_stack, diff --git a/breathe/directives/item.py b/breathe/directives/item.py index 8f8dc3ef..c6cb0fc5 100644 --- a/breathe/directives/item.py +++ b/breathe/directives/item.py @@ -1,7 +1,8 @@ +from __future__ import annotations + from breathe.directives import BaseDirective from breathe.file_state_cache import MTimeError from breathe.project import ProjectError -from breathe.renderer.filter import Filter from breathe.renderer.mask import NullMaskFactory from breathe.renderer.target import create_target_handler @@ -9,10 +10,24 @@ from docutils.parsers.rst.directives import unchanged_required, flag -from typing import Any, List +from typing import Any, cast, ClassVar, TYPE_CHECKING + +if TYPE_CHECKING: + from breathe.renderer.filter import DoxFilter + from typing_extensions import NotRequired, TypedDict + + DoxBaseItemOptions = TypedDict('DoxBaseItemOptions',{ + 'path': str, + 'project': str, + 'outline': NotRequired[None], + 'no-link': NotRequired[None]}) +else: + DoxBaseItemOptions = None class _DoxygenBaseItemDirective(BaseDirective): + kind: ClassVar[str] + required_arguments = 1 optional_arguments = 1 option_spec = { @@ -23,19 +38,21 @@ class _DoxygenBaseItemDirective(BaseDirective): } has_content = False - def create_finder_filter(self, namespace: str, name: str) -> Filter: + def create_finder_filter(self, namespace: str, name: str) -> DoxFilter: """Creates a filter to find the node corresponding to this item.""" return self.filter_factory.create_member_finder_filter(namespace, name, self.kind) - def run(self) -> List[Node]: + def run(self) -> list[Node]: + options = cast(DoxBaseItemOptions,self.options) + try: namespace, name = self.arguments[0].rsplit("::", 1) except ValueError: namespace, name = "", self.arguments[0] try: - project_info = self.project_info_factory.create_project_info(self.options) + project_info = self.project_info_factory.create_project_info(options) except ProjectError as e: warning = self.create_warning(None, kind=self.kind) return warning.warn("doxygen{kind}: %s" % e) @@ -49,7 +66,7 @@ def run(self) -> List[Node]: finder_filter = self.create_finder_filter(namespace, name) # TODO: find a more specific type for the Doxygen nodes - matches: List[Any] = [] + matches: list[Any] = [] finder.filter_(finder_filter, matches) if len(matches) == 0: @@ -57,8 +74,8 @@ def run(self) -> List[Node]: warning = self.create_warning(project_info, kind=self.kind, display_name=display_name) return warning.warn('doxygen{kind}: Cannot find {kind} "{display_name}" {tail}') - target_handler = create_target_handler(self.options, project_info, self.state.document) - filter_ = self.filter_factory.create_outline_filter(self.options) + target_handler = create_target_handler(options, project_info, self.state.document) + filter_ = self.filter_factory.create_outline_filter(options) node_stack = matches[0] mask_factory = NullMaskFactory() @@ -78,7 +95,7 @@ class DoxygenDefineDirective(_DoxygenBaseItemDirective): class DoxygenConceptDirective(_DoxygenBaseItemDirective): kind = "concept" - def create_finder_filter(self, namespace: str, name: str) -> Filter: + def create_finder_filter(self, namespace: str, name: str) -> DoxFilter: # Unions are stored in the xml file with their fully namespaced name # We're using C++ namespaces here, it might be best to make this file # type dependent @@ -94,7 +111,7 @@ class DoxygenEnumDirective(_DoxygenBaseItemDirective): class DoxygenEnumValueDirective(_DoxygenBaseItemDirective): kind = "enumvalue" - def create_finder_filter(self, namespace: str, name: str) -> Filter: + def create_finder_filter(self, namespace: str, name: str) -> DoxFilter: return self.filter_factory.create_enumvalue_finder_filter(name) @@ -105,7 +122,7 @@ class DoxygenTypedefDirective(_DoxygenBaseItemDirective): class DoxygenUnionDirective(_DoxygenBaseItemDirective): kind = "union" - def create_finder_filter(self, namespace: str, name: str) -> Filter: + def create_finder_filter(self, namespace: str, name: str) -> DoxFilter: # Unions are stored in the xml file with their fully namespaced name # We're using C++ namespaces here, it might be best to make this file # type dependent diff --git a/breathe/filetypes.py b/breathe/filetypes.py index 4372d363..123fe221 100644 --- a/breathe/filetypes.py +++ b/breathe/filetypes.py @@ -2,18 +2,17 @@ A module to house the methods for resolving a code-blocks language based on filename (and extension). """ -from typing import Optional import os.path from pygments.lexers import get_lexer_for_filename from pygments.util import ClassNotFound -def get_pygments_alias(filename: str) -> Optional[str]: +def get_pygments_alias(filename: str) -> str | None: "Find first pygments alias from filename" try: lexer_cls = get_lexer_for_filename(filename) - return lexer_cls.aliases[0] # type: ignore + return lexer_cls.aliases[0] except ClassNotFound: return None diff --git a/breathe/finder/__init__.py b/breathe/finder/__init__.py index 37261553..a7dc1f0b 100644 --- a/breathe/finder/__init__.py +++ b/breathe/finder/__init__.py @@ -5,7 +5,7 @@ if TYPE_CHECKING: from breathe.project import ProjectInfo from breathe.finder.factory import DoxygenItemFinderFactory - from breathe.renderer.filter import Filter + from breathe.renderer.filter import DoxFilter from breathe.renderer import TaggedNode T = TypeVar('T', covariant=True) @@ -17,5 +17,5 @@ def __init__(self, project_info: ProjectInfo, data_object: T, item_finder_factor self.item_finder_factory: DoxygenItemFinderFactory = item_finder_factory self.project_info = project_info - def filter_(self, ancestors: list[TaggedNode], filter_: Filter, matches: list[list[TaggedNode]]) -> None: + def filter_(self, ancestors: list[TaggedNode], filter_: DoxFilter, matches: list[list[TaggedNode]]) -> None: raise NotImplementedError diff --git a/breathe/finder/compound.py b/breathe/finder/compound.py index cf3addce..05ef8eb9 100644 --- a/breathe/finder/compound.py +++ b/breathe/finder/compound.py @@ -1,11 +1,17 @@ +from __future__ import annotations + from breathe.finder import ItemFinder -from breathe.renderer.filter import Filter +from breathe.renderer.filter import NodeStack from breathe import parser from breathe.renderer import TaggedNode +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from breathe.renderer.filter import DoxFilter class DoxygenTypeSubItemFinder(ItemFinder[parser.Node_DoxygenType]): - def filter_(self, ancestors, filter_: Filter, matches: list[list[TaggedNode]]) -> None: + def filter_(self, ancestors, filter_: DoxFilter, matches: list[list[TaggedNode]]) -> None: """Find nodes which match the filter. Doesn't test this node, only its children""" node_stack = [TaggedNode(None,self.data_object)] + ancestors @@ -15,11 +21,11 @@ def filter_(self, ancestors, filter_: Filter, matches: list[list[TaggedNode]]) - class CompoundDefTypeSubItemFinder(ItemFinder[parser.Node_compounddefType]): - def filter_(self, ancestors, filter_: Filter, matches: list[list[TaggedNode]]) -> None: + def filter_(self, ancestors, filter_: DoxFilter, matches: list[list[TaggedNode]]) -> None: """Finds nodes which match the filter and continues checks to children""" node_stack = [TaggedNode(None,self.data_object)] + ancestors - if filter_.allow(node_stack): + if filter_(NodeStack(node_stack)): matches.append(node_stack) for sectiondef in self.data_object.sectiondef: @@ -32,11 +38,11 @@ def filter_(self, ancestors, filter_: Filter, matches: list[list[TaggedNode]]) - class SectionDefTypeSubItemFinder(ItemFinder[parser.Node_sectiondefType]): - def filter_(self, ancestors, filter_: Filter, matches: list[list[TaggedNode]]) -> None: + def filter_(self, ancestors, filter_: DoxFilter, matches: list[list[TaggedNode]]) -> None: """Find nodes which match the filter. Doesn't test this node, only its children""" node_stack = [TaggedNode(None,self.data_object)] + ancestors - if filter_.allow(node_stack): + if filter_(NodeStack(node_stack)): matches.append(node_stack) for memberdef in self.data_object.memberdef: @@ -45,22 +51,22 @@ def filter_(self, ancestors, filter_: Filter, matches: list[list[TaggedNode]]) - class MemberDefTypeSubItemFinder(ItemFinder[parser.Node_memberdefType]): - def filter_(self, ancestors, filter_: Filter, matches: list[list[TaggedNode]]) -> None: + def filter_(self, ancestors, filter_: DoxFilter, matches: list[list[TaggedNode]]) -> None: data_object = self.data_object node_stack = [TaggedNode(None,self.data_object)] + ancestors - if filter_.allow(node_stack): + if filter_(NodeStack(node_stack)): matches.append(node_stack) if data_object.kind == parser.DoxMemberKind.enum: for value in data_object.enumvalue: value_stack = [TaggedNode('enumvalue',value)] + node_stack - if filter_.allow(value_stack): + if filter_(NodeStack(value_stack)): matches.append(value_stack) class RefTypeSubItemFinder(ItemFinder[parser.Node_refType]): - def filter_(self, ancestors, filter_: Filter, matches: list[list[TaggedNode]]) -> None: + def filter_(self, ancestors, filter_: DoxFilter, matches: list[list[TaggedNode]]) -> None: node_stack = [TaggedNode(None,self.data_object)] + ancestors - if filter_.allow(node_stack): + if filter_(NodeStack(node_stack)): matches.append(node_stack) diff --git a/breathe/finder/factory.py b/breathe/finder/factory.py index d4ff7be1..bf187e47 100644 --- a/breathe/finder/factory.py +++ b/breathe/finder/factory.py @@ -5,14 +5,14 @@ from breathe.finder import compound as compoundfinder from breathe import parser from breathe.project import ProjectInfo -from breathe.renderer import FakeParentNode, TaggedNode -from breathe.renderer.filter import Filter +from breathe.renderer import TaggedNode from sphinx.application import Sphinx from typing import Any, Callable, TYPE_CHECKING if TYPE_CHECKING: + from breathe.renderer.filter import DoxFilter ItemFinderCreator = Callable[[ProjectInfo,Any,'DoxygenItemFinderFactory'],ItemFinder] @@ -40,11 +40,11 @@ def __init__(self, root, item_finder_factory: DoxygenItemFinderFactory) -> None: self._root = root self.item_finder_factory = item_finder_factory - def filter_(self, filter_: Filter, matches: list[list[TaggedNode]]) -> None: + def filter_(self, filter_: DoxFilter, matches: list[list[TaggedNode]]) -> None: """Adds all nodes which match the filter into the matches list""" item_finder = self.item_finder_factory.create_finder(self._root) - item_finder.filter_([TaggedNode(None,FakeParentNode())], filter_, matches) + item_finder.filter_([], filter_, matches) def root(self): return self._root diff --git a/breathe/finder/index.py b/breathe/finder/index.py index 7a1ab4ed..12b65bb8 100644 --- a/breathe/finder/index.py +++ b/breathe/finder/index.py @@ -1,7 +1,7 @@ from __future__ import annotations from breathe.finder import ItemFinder -from breathe.renderer.filter import FilterFactory +from breathe.renderer.filter import FilterFactory, NodeStack from breathe import parser from breathe.renderer import TaggedNode @@ -10,11 +10,11 @@ from typing import Any, TYPE_CHECKING if TYPE_CHECKING: - from breathe.renderer.filter import Filter + from breathe.renderer.filter import DoxFilter class DoxygenTypeSubItemFinder(ItemFinder[parser.Node_DoxygenTypeIndex]): - def filter_(self, ancestors, filter_: Filter, matches) -> None: + def filter_(self, ancestors, filter_: DoxFilter, matches) -> None: """Find nodes which match the filter. Doesn't test this node, only its children""" compounds = self.data_object.compound @@ -31,7 +31,7 @@ def __init__(self, app: Sphinx, compound_parser: parser.DoxygenCompoundParser, * self.filter_factory = FilterFactory(app) self.compound_parser = compound_parser - def filter_(self, ancestors: list[TaggedNode], filter_: Filter, matches) -> None: + def filter_(self, ancestors: list[TaggedNode], filter_: DoxFilter, matches) -> None: """Finds nodes which match the filter and continues checks to children Requires parsing the xml files referenced by the children for which we use the compound @@ -42,7 +42,7 @@ def filter_(self, ancestors: list[TaggedNode], filter_: Filter, matches) -> None node_stack = [TaggedNode(None,self.data_object)] + ancestors # Match against compound object - if filter_.allow(node_stack): + if filter_(NodeStack(node_stack)): matches.append(node_stack) # Descend to member children @@ -60,9 +60,11 @@ def filter_(self, ancestors: list[TaggedNode], filter_: Filter, matches) -> None finder = self.item_finder_factory.create_finder(file_data) for member_stack in member_matches: - ref_filter = self.filter_factory.create_id_filter( - parser.Node_memberdefType, member_stack[0].refid - ) + refid = member_stack[0].refid + def ref_filter(nstack): + node = nstack.node + return isinstance(node,parser.Node_memberdefType) and node.id == refid + finder.filter_(node_stack, ref_filter, matches) else: # Read in the xml file referenced by the compound and descend into that as well @@ -72,9 +74,9 @@ def filter_(self, ancestors: list[TaggedNode], filter_: Filter, matches) -> None class MemberTypeSubItemFinder(ItemFinder[parser.Node_memberdefType]): - def filter_(self, ancestors, filter_: Filter, matches) -> None: + def filter_(self, ancestors, filter_: DoxFilter, matches) -> None: node_stack = [TaggedNode(None,self.data_object)] + ancestors # Match against member object - if filter_.allow(node_stack): + if filter_(NodeStack(node_stack)): matches.append(node_stack) diff --git a/breathe/project.py b/breathe/project.py index dd5b6453..89127427 100644 --- a/breathe/project.py +++ b/breathe/project.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from .exception import BreatheError from sphinx.application import Sphinx @@ -6,7 +8,14 @@ import fnmatch -from typing import Dict +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from typing_extensions import TypedDict + + ProjectOptions = TypedDict('ProjectOptions',{ + 'path': str, + 'project': str}) class ProjectError(BreatheError): @@ -115,9 +124,9 @@ def __init__(self, app: Sphinx): # This can be overridden with the breathe_build_directory config variable self._default_build_dir = str(app.doctreedir.parent) self.project_count = 0 - self.project_info_store: Dict[str, ProjectInfo] = {} - self.project_info_for_auto_store: Dict[str, AutoProjectInfo] = {} - self.auto_project_info_store: Dict[str, AutoProjectInfo] = {} + self.project_info_store: dict[str, ProjectInfo] = {} + self.project_info_for_auto_store: dict[str, AutoProjectInfo] = {} + self.auto_project_info_store: dict[str, AutoProjectInfo] = {} @property def build_dir(self) -> str: @@ -146,7 +155,7 @@ def default_path(self) -> str: % config.breathe_default_project ) - def create_project_info(self, options) -> ProjectInfo: + def create_project_info(self, options: ProjectOptions) -> ProjectInfo: config = self.app.config name = config.breathe_default_project diff --git a/breathe/renderer/__init__.py b/breathe/renderer/__init__.py index 1b472615..fd24796d 100644 --- a/breathe/renderer/__init__.py +++ b/breathe/renderer/__init__.py @@ -9,7 +9,7 @@ from breathe.renderer import mask from breathe.directives.index import RootDataObject - DataObject = Union[parser.NodeOrValue, RootDataObject, 'FakeParentNode'] + DataObject = Union[parser.NodeOrValue, RootDataObject] def format_parser_error(name: str, error: str, filename: str, state, lineno: int, do_unicode_warning: bool = False) -> list[nodes.Node]: @@ -43,8 +43,6 @@ def format_parser_error(name: str, error: str, filename: str, state, lineno: int ), ] -class FakeParentNode: - pass class TaggedNode(NamedTuple): tag: str | None diff --git a/breathe/renderer/filter.py b/breathe/renderer/filter.py index 00c0ea45..97dec5bf 100644 --- a/breathe/renderer/filter.py +++ b/breathe/renderer/filter.py @@ -12,44 +12,6 @@ DoxygenToRstRendererFactory. -General Implementation -~~~~~~~~~~~~~~~~~~~~~~ - -Filters are essential just tests to see if a node matches certain parameters that are needed to -decide whether or not to include it in some output. - -As these filters are declared once and then used on multiple nodes, we model them as object -hierarchies that encapsulate the required test and take a node (with its context) and return True or -False. - -If you wanted a test which figures out if a node has the 'kind' attribute set to 'function' you -might create the following object hierarchy: - - kind_is_function = InFilter(AttributeAccessor(Node(), 'kind'), ['function']) - -This reads from the inside out, as get the node, then get the 'kind' attribute from it, and see if -the value of the attribute is in the list ['function']. - -The Node() is called a 'Selector'. Parent() is also a selector. It means given the current context, -work with the parent of the current node rather than the node itself. This allows you to frame tests -in terms of a node's parent as well as the node which helps when we want nodes with particular -parents and not others. - -The AttributeAccessor() is called an 'Accessor'. It wraps up an attempt to access a particular -attribute on the selected node. There are quite a few different specific accessors but they can -mostly be generalised with the AttributeAccessor. This code has evolved over time and initially the -implementation involved specific accessor classes (which are still used in large parts of it.) - -The InFilter() is unsurprisingly called a 'Filter'. There are lots of different filters. Filters -either act on the results of Accessors or on the results of other Filters and they always return -True or False. The AndFilter and the OrFilter can be used to combine the outputs of other Filters -with logical 'and' and 'or' operations. - -You can build up some pretty complex expressions with this level of freedom as you -might imagine. The complexity is unfortunate but necessary as the nature of filtering the xml is -quite complex. - - Finder Filters ~~~~~~~~~~~~~~ @@ -60,7 +22,7 @@ # Descend down the hierarchy # ... - if filter_.allow(node_stack): + if filter_(node_stack): matches.append(self.data_object) # Keep on descending @@ -71,28 +33,6 @@ are interested in and they don't have to worry about allowing the iteration down the hierarchy to continue for nodes which don't match. -An example of a finder filter is: - - AndFilter( - InFilter(NodeTypeAccessor(Node()), [parser.Node_CompoundType]), - InFilter(KindAccessor(Node()), ["group"]), - InFilter(NameAccessor(Node()), ["mygroup"]) - ) - -This says, return True for all the nodes of type 'parser.Node_CompoundType' with 'kind' set to -'group' which have the name 'mygroup'. It returns false for everything else, but when a node -matching this is found then it is added to the matches list by the code above. - -It is therefore relatively easy to write finder filters. If you have two separate node filters like -the one above and you want to match on both of them then you can do: - - OrFilter( - node_filter_1, - node_filter_2 - ) - -To combine them. - Content Filters ~~~~~~~~~~~~~~~ @@ -101,103 +41,6 @@ down the hierarchy if they return false. This means that if you're interested in Node_memberdefType nodes with a particular attribute then you have to check for that but also include a clause which allows all other non-Node_memberdefType nodes to pass through as you don't want to interrupt them. - -This means you end up with filters like this: - - OrFilter( - AndFilter( - InFilter(NodeTypeAccessor(Node()), [parser.Node_memberdefType]), - InFilter(KindAccessor(Node()), ["group"]), - InFilter(NameAccessor(Node()), ["mygroup"]) - ), - NotFilter( - AndFilter( - InFilter(NodeTypeAccessor(Node()), [parser.Node_memberdefType]), - InFilter(KindAccessor(Node()), ["group"]), - ) - ) - ) - -Which is to say that we want to let through a Node_memberdefType, with kind group, with name -'mygroup' but we're also happy if the node is **not** a Node_memberdefType with kind group. Really -we just don't want to let through any Node_memberdefTypes with kind group with name other than -'mygroup'. As such, we can rephrase this as: - - NotFilter( - AndFilter( - InFilter(NodeTypeAccessor(Node()), [parser.Node_memberdefType]), - InFilter(KindAccessor(Node()), ["group"]), - NotFilter(InFilter(NameAccessor(Node()), ["mygroup"])) - ) - ) - -Using logical manipulation we can rewrite this as: - - OrFilter( - NotFilter(InFilter(NodeTypeAccessor(Node()), [parser.Node_memberdefType])), - NotFilter(InFilter(KindAccessor(Node()), ["group"])), - InFilter(NameAccessor(Node()), ["mygroup"]) - ) - -We reads: allow if it isn't a Node_memberdefType, or if it is a Node_memberdefType but doesn't have -a 'kind' of 'group', but if it is a Node_memberdefType and has a 'kind' of 'group then only allow it -if it is named 'mygroup'. - - -Helper Syntax -~~~~~~~~~~~~~ - -Some of these filter declarations get a little awkward to read and write. They are not laid out in -manner which reads smoothly. Additional helper methods and operator overloads have been introduced -to help with this. - -AttributeAccessor objects are created in property methods on the Selector classes so: - - node.kind - -Where node has been declared as a Node() instance. Results in: - - AttributeAccessor(Node(), 'kind') - -The '==' and '!=' operators on the Accessors have been overloaded to return the appropriate filters -so that: - - node.kind == 'group' - -Results in: - - InFilter(AttributeAccessor(Node(), 'kind'), ['kind']) - -We also override the binary 'and' (&), 'or' (|) and 'not' (~) operators in Python to apply -AndFilters, OrFilters and NotFilters respectively. We have to override the binary operators as they -actual 'and', 'or' and 'not' operators cannot be overridden. So: - - (type.node_type == parser.Node_CompoundType) & (node.name == 'mygroup') - -Translates to: - - AndFilter( - InFilter(NodeTypeAccessor(Node()), [parser.Node_CompoundType])), - InFilter(NameAccessor(Node()), ["mygroup"]) - ) - -Where the former is hopefully more readable without sacrificing too much to the abstract magic of -operator overloads. - - -Operator Precedences & Extra Parenthesis -'''''''''''''''''''''''''''''''''''''''' - -As the binary operators have a lower operator precedence than '==' and '!=' and some other operators -we have to include additional parenthesis in the expressions to group them as we want. So instead of -writing: - - node.node_type == parser.Node_CompoundType & node.name == 'mygroup' - -We have to write: - - (node.node_type == parser.Node_CompoundType) & (node.name == 'mygroup') - """ from __future__ import annotations @@ -207,390 +50,75 @@ from sphinx.application import Sphinx import os -from typing import Any, Callable, TYPE_CHECKING -from collections.abc import Iterable +from typing import Any, Callable, Literal, SupportsIndex, TYPE_CHECKING +from collections.abc import Container, Iterable, Mapping if TYPE_CHECKING: + from typing_extensions import TypeAlias, TypeVar from breathe import renderer + from breathe.directives.class_like import DoxClassOptions + from breathe.directives.content_block import DoxContentBlockOptions + DoxNamespaceOptions: TypeAlias = DoxClassOptions | DoxContentBlockOptions -class UnrecognisedKindError(Exception): - pass - + T_options = TypeVar("T_options", DoxClassOptions, DoxContentBlockOptions) -############################################################################### -# Selectors -############################################################################### + DoxFilter: TypeAlias = Callable[["NodeStack"],bool] +else: + DoxClassOptions = None + DoxNamespaceOptions = None -class Selector: - def __call__(self, node_stack: list[renderer.TaggedNode]) -> renderer.TaggedNode: - raise NotImplementedError - - @property - def node_type(self): - return NodeTypeAccessor(self) +CLASS_LIKE_COMPOUNDDEF = (parser.DoxCompoundKind.class_, parser.DoxCompoundKind.struct, parser.DoxCompoundKind.interface) - @property - def kind(self): - return KindAccessor(self) - - @property - def name(self): - return AttributeAccessor(self, "name") - @property - def briefdescription(self): - return AttributeAccessor(self, "briefdescription") +class NodeStack: + def __init__(self, stack: list[renderer.TaggedNode]): + self.stack = stack - @property - def detaileddescription(self): - return AttributeAccessor(self, "detaileddescription") + def ancestor(self, generations: SupportsIndex) -> renderer.DataObject | None: + i = generations.__index__() + return self.stack[i].value if len(self.stack) > i else None @property - def prot(self): - return AttributeAccessor(self, "prot") + def parent(self) -> renderer.DataObject | None: + return self.stack[1].value if len(self.stack) > 1 else None @property - def valueOf(self): - return ValueOfAccessor(self) + def node(self) -> renderer.DataObject: + return self.stack[0].value @property - def id(self): - return AttributeAccessor(self, "id") - - -class Ancestor(Selector): - def __init__(self, generations): - self.generations = generations - - def __call__(self, node_stack: list[renderer.TaggedNode]) -> renderer.TaggedNode: - return node_stack[self.generations] - - -class Parent(Selector): - def __call__(self, node_stack: list[renderer.TaggedNode]) -> renderer.TaggedNode: - return node_stack[1] - - def __repr__(self) -> str: - return 'Parent()' - - -class Node(Selector): - def __call__(self, node_stack: list[renderer.TaggedNode]) -> renderer.TaggedNode: - return node_stack[0] - - def __repr__(self) -> str: - return 'Node()' - - -############################################################################### -# Accessors -############################################################################### - - -class Accessor: - def __init__(self, selector: Selector) -> None: - self.selector = selector - - def __call__(self, node_stack): - raise NotImplementedError - - def __eq__(self, value: Any) -> InFilter: - return InFilter(self, [value]) - - def __ne__(self, value: Any) -> NotFilter: - return NotFilter(InFilter(self, [value])) - - def is_one_of(self, collection: Iterable[Any]) -> InFilter: - return InFilter(self, collection) - - def has_content(self) -> HasContentFilter: - return HasContentFilter(self) - - def endswith(self, options: list[str]) -> EndsWithFilter: - return EndsWithFilter(self, options) - - -class NameAccessor(Accessor): - def __call__(self, node_stack) -> str: - return self.selector(node_stack).value.name # type: ignore - - -class NodeTypeAccessor(Accessor): - def __call__(self, node_stack) -> type: - return type(self.selector(node_stack).value) - - def __repr__(self) -> str: - return f'NodeTypeAccessor({self.selector!r})' - - -class KindAccessor(Accessor): - def __call__(self, node_stack): - return self.selector(node_stack).value.kind.value # type: ignore - - -class AttributeAccessor(Accessor): - """Returns the value of a particular attribute on the selected node. - - AttributeAccessor(Node(), 'name') returns the value of ``node.name``. - """ - - def __init__(self, selector: Selector, attribute_name: str) -> None: - super().__init__(selector) - self.attribute_name = attribute_name - - def __call__(self, node_stack) -> Any: - return getattr(self.selector(node_stack).value, self.attribute_name) - - def __repr__(self) -> str: - return f'AttributeAccessor({self.selector!r}, {self.attribute_name!r})' - - -class LambdaAccessor(Accessor): - def __init__(self, selector: Selector, func: Callable[[Any], Any]): - super().__init__(selector) - self.func = func - - def __call__(self, node_stack): - return self.func(self.selector(node_stack).value) - - -class NamespaceAccessor(Accessor): - def __call__(self, node_stack): - r = set(''.join(ns) for ns in self.selector(node_stack).value.innernamespace) # type: ignore - r.update(''.join(ns) for ns in self.selector(node_stack).value.innerclass) # type: ignore - return r - - -class ValueOfAccessor(Accessor): - def __call__(self, node_stack) -> str: - return ''.join(self.selector(node_stack).value) # type: ignore - - -############################################################################### -# Filters -############################################################################### - - -class Filter: - def allow(self, node_stack) -> bool: - raise NotImplementedError - - def __and__(self, other: "Filter") -> AndFilter: - return AndFilter(self, other) - - def __or__(self, other: "Filter") -> OrFilter: - return OrFilter(self, other) - - def __invert__(self) -> "NotFilter": - return NotFilter(self) - - -class HasAncestorFilter(Filter): - def __init__(self, generations: int) -> None: - self.generations = generations - - def allow(self, node_stack) -> bool: - return len(node_stack) > self.generations - - -class HasContentFilter(Filter): - def __init__(self, accessor: Accessor): - self.accessor = accessor - - def allow(self, node_stack) -> bool: - """Detects if the node in questions has an empty .content_ property.""" - - return bool(self.accessor(node_stack).content_) - - -class EndsWithFilter(Filter): - """Detects if the string result of the accessor ends with any of the strings in the ``options`` - iterable parameter. - """ - - def __init__(self, accessor: Accessor, options: list[str]): - self.accessor = accessor - self.options = options - - def allow(self, node_stack) -> bool: - string = self.accessor(node_stack) - for entry in self.options: - if string.endswith(entry): - return True - return False - + def tag(self) -> str | None: + return self.stack[0].tag -class InFilter(Filter): - """Checks if what is returned from the accessor is 'in' in the members""" - def __init__(self, accessor: Accessor, members: Iterable[Any]) -> None: - self.accessor = accessor - self.members = members +def path_matches(location: str, target_file: str) -> bool: + if path_handler.includes_directory(target_file): + # If the target_file contains directory separators then + # match against the same length at the end of the location + # + location_match = location[-len(target_file) :] + return location_match == target_file - def allow(self, node_stack) -> bool: - name = self.accessor(node_stack) - return name in self.members + # If there are no separators, match against the whole filename + # at the end of the location + # + # This is to prevent "Util.cpp" matching "PathUtil.cpp" + # + location_basename = os.path.basename(location) + return location_basename == target_file - def __repr__(self) -> str: - mem_str = ', '.join(repr(m) for m in self.members) - return f'InFilter({self.accessor!r}, {{{mem_str}}})' +def location_matches(location: parser.Node_locationType | None, target_file: str) -> bool: + return location is not None and path_matches(location.file, target_file) -class GlobFilter(Filter): - def __init__(self, accessor: Accessor, glob): - self.accessor = accessor - self.glob = glob - def allow(self, node_stack) -> bool: - text = self.accessor(node_stack) - return self.glob.match(text) - - -class FilePathFilter(Filter): - def __init__(self, accessor: Accessor, target_file: str): - self.accessor = accessor - self.target_file = target_file - - def allow(self, node_stack) -> bool: - location = self.accessor(node_stack).file - - if path_handler.includes_directory(self.target_file): - # If the target_file contains directory separators then - # match against the same length at the end of the location - # - location_match = location[-len(self.target_file) :] - return location_match == self.target_file - else: - # If there are no separators, match against the whole filename - # at the end of the location - # - # This is to prevent "Util.cpp" matching "PathUtil.cpp" - # - location_basename = os.path.basename(location) - return location_basename == self.target_file - - -class NamespaceFilter(Filter): - def __init__(self, namespace_accessor: Accessor, name_accessor: Accessor): - self.namespace_accessor = namespace_accessor - self.name_accessor = name_accessor - - def allow(self, node_stack) -> bool: - namespaces = self.namespace_accessor(node_stack) - name = self.name_accessor(node_stack) - - try: - namespace, name = name.rsplit("::", 1) - except ValueError: - namespace, name = "", name - - return namespace in namespaces - - -class OpenFilter(Filter): - def allow(self, node_stack) -> bool: - return True - - -class ClosedFilter(Filter): - def allow(self, node_stack) -> bool: - return False - - -class NotFilter(Filter): - def __init__(self, child_filter: Filter): - self.child_filter = child_filter - - def allow(self, node_stack) -> bool: - return not self.child_filter.allow(node_stack) - - -class AndFilter(Filter): - def __init__(self, *filters: Filter): - self.filters = [] - for f in filters: - if isinstance(f,AndFilter): self.filters.extend(f.filters) - else: self.filters.append(f) - - def allow(self, node_stack) -> bool: - # If any filter returns False then return False - for filter_ in self.filters: - if not filter_.allow(node_stack): - return False - return True - - def __repr__(self) -> str: - args = ', '.join(map(repr,self.filters)) - return f'AndFilter({args})' - - -class OrFilter(Filter): - """Provides a short-cutted 'or' operation between two filters""" - - def __init__(self, *filters: Filter): - self.filters = [] - for f in filters: - if isinstance(f,OrFilter): self.filters.extend(f.filters) - else: self.filters.append(f) - - def allow(self, node_stack) -> bool: - # If any filter returns True then return True - for filter_ in self.filters: - if filter_.allow(node_stack): - return True - return False - - def __repr__(self) -> str: - args = ', '.join(map(repr,self.filters)) - return f'OrFilter({args})' - - -class IfFilter(Filter): - def __init__(self, condition, if_true, if_false): - self.condition = condition - self.if_true = if_true - self.if_false = if_false - - def allow(self, node_stack) -> bool: - if self.condition.allow(node_stack): - return self.if_true.allow(node_stack) - else: - return self.if_false.allow(node_stack) - - -class Gather(Filter): - def __init__(self, accessor: Accessor, names: list[str]): - self.accessor = accessor - self.names = names - - def allow(self, node_stack) -> bool: - self.names.extend(self.accessor(node_stack)) - return False - - -class TagFilter(Filter): - def __init__(self, selector: Selector, tags: Iterable[str]): - self.selector = selector - self.tags = frozenset(tags) - - def allow(self, node_stack) -> bool: - return self.selector(node_stack).tag in self.tags - - -class LambdaFilter(Filter): - def __init__(self, selector: Selector, func: Callable[[Any], Any]): - self.selector = selector - self.func = func - - def allow(self, node_stack): - return self.func(self.selector(node_stack).value) - - -############################################################################### -# Other stuff -############################################################################### +def namespace_matches(name: str, node: parser.Node_compounddefType): + to_find = name.rpartition("::")[0] + return any(to_find == "".join(ns) for ns in node.innernamespace) or any( + to_find == "".join(ns) for ns in node.innernamespace + ) class Glob: @@ -618,62 +146,54 @@ class FilterFactory: def __init__(self, app: Sphinx) -> None: self.app = app - def create_render_filter(self, kind: str, options: dict[str, Any]) -> Filter: - """Render filter for group & namespace blocks""" + def set_defaults(self, options: T_options) -> T_options: + r: Any = options.copy() + for m in self.app.config.breathe_default_members: + r.setdefault(m, "") + return r - if kind not in ["group", "page", "namespace"]: - raise UnrecognisedKindError(kind) + def create_render_filter(self, kind: Literal['group', 'page', 'namespace'], options: DoxContentBlockOptions) -> DoxFilter: + """Render filter for group & namespace blocks""" - # Generate new dictionary from defaults - filter_options: dict[str, Any] = {entry: "" for entry in self.app.config.breathe_default_members} + filter_options = self.set_defaults(options) - # Update from the actual options - filter_options.update(options) + if "desc-only" in filter_options: + return self._create_description_filter(True, parser.Node_compounddefType) - # Convert the doxygengroup members flag (which just stores None as the value) to an empty - # string to allow the create_class_member_filter to process it properly - if "members" in filter_options: - filter_options["members"] = "" + cm_filter = self.create_class_member_filter(filter_options) + ic_filter = self.create_innerclass_filter(filter_options) + o_filter = self.create_outline_filter(filter_options) - if "desc-only" in filter_options: - return self._create_description_filter(True, "compounddef", options) - - node = Node() - grandparent = Ancestor(2) - has_grandparent = HasAncestorFilter(2) - - non_class_memberdef = ( - has_grandparent - & (grandparent.node_type == "compounddef") - & (grandparent.kind != "class") - & (grandparent.kind != "struct") - & (grandparent.kind != "interface") - & (node.node_type == "memberdef") - ) + def filter(nstack: NodeStack) -> bool: + grandparent = nstack.ancestor(2) + return ((cm_filter(nstack) or ( + isinstance(grandparent,parser.Node_compounddefType) + and grandparent.kind not in CLASS_LIKE_COMPOUNDDEF + and isinstance(nstack.node, parser.Node_memberdefType))) + and ic_filter(nstack) + and o_filter(nstack)) - return ( - (self.create_class_member_filter(filter_options) | non_class_memberdef) - & self.create_innerclass_filter(filter_options) - & self.create_outline_filter(filter_options) - ) + return filter - def create_class_filter(self, target: str, options: dict[str, Any]) -> Filter: + def create_class_filter(self, target: str, options: DoxClassOptions) -> DoxFilter: """Content filter for classes based on various directive options""" - # Generate new dictionary from defaults - filter_options: dict[str, Any] = {entry: "" for entry in self.app.config.breathe_default_members} + filter_options = self.set_defaults(options) - # Update from the actual options - filter_options.update(options) + cm_filter = self.create_class_member_filter(filter_options) + ic_filter = self.create_innerclass_filter(filter_options, outerclass=target) + o_filter = self.create_outline_filter(filter_options) + s_filter = self.create_show_filter(filter_options) - return AndFilter( - self.create_class_member_filter(filter_options), - self.create_innerclass_filter(filter_options, outerclass=target), - self.create_outline_filter(filter_options), - self.create_show_filter(filter_options), - ) + return (lambda nstack: cm_filter(nstack) + and ic_filter(nstack) + and o_filter(nstack) + and s_filter(nstack)) - def create_innerclass_filter(self, options: dict[str, Any], outerclass: str = "") -> Filter: + @classmethod + def create_innerclass_filter( + cls, options: DoxNamespaceOptions, outerclass: str = "" + ) -> DoxFilter: """ :param outerclass: Should be the class/struct being target by the directive calling this code. If it is a group or namespace directive then it should be left @@ -682,167 +202,114 @@ def create_innerclass_filter(self, options: dict[str, Any], outerclass: str = "" The name should include any additional namespaces that the target class is in. """ + allowed: set[parser.DoxProtectionKind] = set() + if "protected-members" in options: allowed.add(parser.DoxProtectionKind.protected) + if "private-members" in options: allowed.add(parser.DoxProtectionKind.private) - node = Node() - node_is_innerclass = (node.node_type == parser.Node_refType) & TagFilter(node, ["innerclass"]) - - parent = Parent() - parent_is_compounddef = parent.node_type == "compounddef" - parent_is_class = parent.kind.is_one_of(["class", "struct", "interface"]) - - allowed: set[str] = set() - all_options = { - "protected-members": "protected", - "private-members": "private", - } - - for option, scope in all_options.items(): - if option in options: - allowed.add(scope) - - node_is_innerclass_in_class = parent_is_compounddef & parent_is_class & node_is_innerclass - - public_innerclass_filter = ClosedFilter() + description = cls._create_description_filter(True, parser.Node_compounddefType) + members: set[str] | None = None if "members" in options: - if options["members"].strip(): - text = options["members"] + members_str = options["members"] + if members_str and members_str.strip(): prefix = ("%s::" % outerclass) if outerclass else "" # Matches sphinx-autodoc behaviour of comma separated values - members = set(["%s%s" % (prefix, x.strip()) for x in text.split(",")]) - node_valueOf_is_in_members = node.valueOf.is_one_of(members) - - # Accept any nodes which don't have a "sectiondef" as a parent or, if they do, only - # accept them if their names are in the members list - public_innerclass_filter = ~node_is_innerclass_in_class | node_valueOf_is_in_members - + members = set(["%s%s" % (prefix, x.strip()) for x in members_str.split(",")]) else: - allowed.add("public") + allowed.add(parser.DoxProtectionKind.public) - node_is_in_allowed_scope = node.prot.is_one_of(allowed) + def filter(nstack: NodeStack) -> bool: + node = nstack.node + parent = nstack.parent - innerclass = ~node_is_innerclass_in_class | node_is_in_allowed_scope - description = self._create_description_filter(True, "compounddef", options) + return (not (isinstance(node,parser.Node_refType) + and nstack.tag == "innerclass" + and isinstance(parent,parser.Node_compounddefType) + and parent.kind in CLASS_LIKE_COMPOUNDDEF) + or node.prot in allowed + or (members is not None and ''.join(node) in members) + or description(nstack)) - # Put parent check last as we only want to check parents of innerclass's otherwise we have - # to check the parent's type as well - return innerclass | public_innerclass_filter | description + return filter - def create_show_filter(self, options: dict[str, Any]) -> Filter: + @staticmethod + def create_show_filter(options: Mapping[str, Any]) -> DoxFilter: """Currently only handles the header-file entry""" - try: - text = options["show"] - except KeyError: - # Allow through everything except the header-file includes nodes - return OrFilter( - NotFilter(InFilter(NodeTypeAccessor(Parent()), [parser.Node_compounddefType])), - NotFilter(InFilter(NodeTypeAccessor(Node()), [parser.Node_incType])), - ) - - if text == "header-file": - # Allow through everything, including header-file includes - return OpenFilter() + if options.get("show") == "header-file": + return lambda nstack: True # Allow through everything except the header-file includes nodes - return OrFilter( - NotFilter(InFilter(NodeTypeAccessor(Parent()), [parser.Node_compounddefType])), - NotFilter(InFilter(NodeTypeAccessor(Node()), [parser.Node_incType])), - ) + def filter(nstack: NodeStack) -> bool: + return not ( + isinstance(nstack.parent, parser.Node_compounddefType) + and isinstance(nstack.node, parser.Node_incType) + ) + + return filter - def _create_description_filter( - self, allow: bool, level: str, options: dict[str, Any] - ) -> Filter: + @staticmethod + def _create_description_filter(allow: bool, level: type[parser.Node]) -> DoxFilter: """Whether or not we allow descriptions is determined by the calling function and we just do whatever the 'allow' function parameter tells us. """ - node = Node() - node_is_description = node.node_type == "description" - parent = Parent() - parent_is_level = parent.node_type == level - - # Nothing with a parent that's a sectiondef - description_filter = ~parent_is_level - - # Let through any description children of sectiondefs if we output any kind members if allow: - description_filter = (parent_is_level & node_is_description) | ~parent_is_level - - return description_filter - - def _create_public_members_filter(self, options: dict[str, Any]) -> Filter: - node = Node() - node_is_memberdef = node.node_type == "memberdef" - node_is_public = node.prot == "public" - - parent = Parent() - parent_is_sectiondef = parent.node_type == "sectiondef" + # Let through any description children of sectiondefs if we output any kind members + def filter(nstack: NodeStack) -> bool: + return not isinstance(nstack.parent,level) or isinstance(nstack.node,parser.Node_descriptionType) + else: + # Nothing with a parent that's a sectiondef + def filter(nstack: NodeStack) -> bool: + return not isinstance(nstack.parent,level) - # Nothing with a parent that's a sectiondef - is_memberdef = parent_is_sectiondef & node_is_memberdef - public_members_filter = ~is_memberdef + return filter - # If the user has specified the 'members' option with arguments then we only pay attention - # to that and not to any other member settings + @staticmethod + def _create_public_members_filter(options: DoxNamespaceOptions) -> Callable[[parser.Node_memberdefType],bool]: if "members" in options: - if options["members"].strip(): - text = options["members"] - + # If the user has specified the 'members' option with arguments then + # we only pay attention to that and not to any other member settings + members_str = options["members"] + if members_str and members_str.strip(): # Matches sphinx-autodoc behaviour of comma separated values - members = set([x.strip() for x in text.split(",")]) - - node_name_is_in_members = node.name.is_one_of(members) + members = set([x.strip() for x in members_str.split(",")]) - # Accept any nodes which don't have a "sectiondef" as a parent or, if they do, only - # accept them if their names are in the members list - public_members_filter = ( - parent_is_sectiondef & node_name_is_in_members - ) | ~parent_is_sectiondef + # Accept any nodes which don't have a "sectiondef" as a parent + # or, if they do, only accept them if their names are in the + # members list + def filter(node: parser.Node_memberdefType) -> bool: + return node.name in members else: - # Select anything that doesn't have a parent which is a sectiondef, or, if it does, - # only select the public ones - public_members_filter = (is_memberdef & node_is_public) | ~is_memberdef - return public_members_filter - - def _create_non_public_members_filter( - self, prot: str, option_name: str, options: dict[str, Any] - ) -> Filter: - """'prot' is the doxygen xml term for 'public', 'protected' and 'private' categories.""" - - node = Node() - node_is_memberdef = node.node_type == "memberdef" - node_is_public = node.prot == prot - - parent = Parent() - parent_is_sectiondef = parent.node_type == "sectiondef" - - # Nothing with a parent that's a sectiondef - is_memberdef = parent_is_sectiondef & node_is_memberdef - filter_ = ~is_memberdef - - if option_name in options: - # Allow anything that isn't a memberdef, or if it is only allow the public ones - filter_ = ~is_memberdef | node_is_public - return filter_ - - def _create_undoc_members_filter(self, options: dict[str, Any]) -> Filter: - node = Node() - node_is_memberdef = node.node_type == "memberdef" - - node_has_description = ( - node.briefdescription.has_content() | node.detaileddescription.has_content() - ) + # Select anything that doesn't have a parent which is a + # sectiondef, or, if it does, only select the public ones + def filter(node: parser.Node_memberdefType) -> bool: + return node.prot == parser.DoxProtectionKind.public + else: + # Nothing with a parent that's a sectiondef + def filter(node: parser.Node_memberdefType) -> bool: + return False - # Allow anything that isn't a memberdef, or if it is only allow the ones with a description - undoc_members_filter = ~node_is_memberdef | node_has_description + return filter + @staticmethod + def _create_undoc_members_filter(options: DoxNamespaceOptions) -> DoxFilter: if "undoc-members" in options: - undoc_members_filter = OpenFilter() - return undoc_members_filter + return lambda nstack: True + + def filter(nstack: NodeStack) -> bool: + node = nstack.node + # Allow anything that isn't a Node_memberdefType, or if it is only + # allow the ones with a description + return (not isinstance(node, parser.Node_memberdefType)) or bool( + node.briefdescription or node.detaileddescription + ) + + return filter - def create_class_member_filter(self, options: dict[str, Any]) -> Filter: + @classmethod + def create_class_member_filter(cls, options: DoxNamespaceOptions) -> DoxFilter: """Content filter based on :members: and :private-members: classes""" # I can't fully explain the filtering of descriptions here. More testing needed to figure @@ -853,114 +320,107 @@ def create_class_member_filter(self, options: dict[str, Any]) -> Filter: "members" in options or "protected-members" in options or "private-members" in options ) - description = self._create_description_filter(allow, "sectiondef", options) + description = cls._create_description_filter(allow, parser.Node_sectiondefType) # Create all necessary filters and combine them - public_members = self._create_public_members_filter(options) - - protected_members = self._create_non_public_members_filter( - "protected", "protected-members", options - ) + public_members = cls._create_public_members_filter(options) + + undoc_members = cls._create_undoc_members_filter(options) + + prot_filter = () + if "protected-members" in options: + prot_filter += (parser.DoxProtectionKind.protected,) + if "private-members" in options: + prot_filter += (parser.DoxProtectionKind.private,) + + # Allow anything that isn't a memberdef, or if it is, and 'prot' is not + # empty, allow the ones with an equal 'prot' attribute + def filter(nstack: NodeStack) -> bool: + node = nstack.node + return (((not (isinstance(node,parser.Node_memberdefType) and isinstance(nstack.parent,parser.Node_sectiondefType)) + or (bool(prot_filter) and node.prot in prot_filter) + or public_members(node)) + and undoc_members(nstack)) + or description(nstack)) + + return filter + + @staticmethod + def create_outline_filter(options: Mapping[str, Any]) -> DoxFilter: + if "outline" in options: + return lambda nstack: not isinstance( + nstack.node, (parser.Node_descriptionType, parser.Node_incType) + ) - private_members = self._create_non_public_members_filter( - "private", "private-members", options - ) + return lambda nstack: True - undoc_members = self._create_undoc_members_filter(options) + @classmethod + def create_file_filter(cls, filename: str, options: Mapping[str, Any]) -> DoxFilter: + valid_names: set[str] = set() - # Allow any public/private members which also fit the undoc filter and all the descriptions - allowed_members = (public_members | protected_members | private_members) & undoc_members - return allowed_members | description + outline_filter = cls.create_outline_filter(options) - def create_outline_filter(self, options: dict[str, Any]) -> Filter: - if "outline" in options: - node = Node() - return ~node.node_type.is_one_of([parser.Node_descriptionType, parser.Node_incType]) - else: - return OpenFilter() - - def create_file_filter(self, filename: str, options: dict[str, Any]) -> Filter: - valid_names: set[str] = set() + def filter(nstack: NodeStack) -> bool: + if not outline_filter(nstack): + return False - def gather_namespaces(node: parser.Node_compounddefType): - valid_names.update(''.join(ns) for ns in node.innernamespace) - valid_names.update(''.join(ns) for ns in node.innerclass) - return False - - filter_ = AndFilter( - NotFilter( - # Gather the "namespaces" attribute from the - # compounddef for the file we're rendering and - # store the information in the "valid_names" list - AndFilter( - InFilter(NodeTypeAccessor(Node()), [parser.Node_compounddefType]), - InFilter(KindAccessor(Node()), ["file"]), - FilePathFilter(LambdaAccessor(Node(), lambda x: x.location), filename), - LambdaFilter(Node(), gather_namespaces), - ) - ), - NotFilter( - # Take the valid_names and everytime we handle an - # innerclass or innernamespace, check that its name - # was one of those initial valid names so that we - # never end up rendering a namespace or class that - # wasn't in the initial file. Notably this is - # required as the location attribute for the - # namespace in the xml is unreliable. - AndFilter( - InFilter(NodeTypeAccessor(Parent()), [parser.Node_compounddefType]), - InFilter(NodeTypeAccessor(Node()), [parser.Node_refType]), - TagFilter(Node(), ["innerclass", "innernamespace"]), - LambdaFilter(Node(), (lambda node: ''.join(node) not in valid_names)) - ) - ), - NotFilter( - # Ignore innerclasses and innernamespaces that are inside a - # namespace that is going to be rendered as they will be - # rendered with that namespace and we don't want them twice - AndFilter( - InFilter(NodeTypeAccessor(Parent()), [parser.Node_compounddefType]), - InFilter(NodeTypeAccessor(Node()), [parser.Node_refType]), - TagFilter(Node(), ["innerclass", "innernamespace"]), - NamespaceFilter( - NamespaceAccessor(Parent()), - LambdaAccessor(Node(), ''.join), - ), - ) - ), - NotFilter( + node = nstack.node + parent = nstack.parent + if isinstance(node, parser.Node_compounddefType): + if node.kind == parser.DoxCompoundKind.file: + # Gather the "namespaces" attribute from the + # compounddef for the file we're rendering and + # store the information in the "valid_names" list + if location_matches(node.location, filename): + valid_names.update("".join(ns) for ns in node.innernamespace) + valid_names.update("".join(ns) for ns in node.innerclass) + + if node.kind != parser.DoxCompoundKind.namespace: + # Ignore compounddefs which are from another file + # (normally means classes and structs which are in a + # namespace that we have other interests in) but only + # check it if the compounddef is not a namespace + # itself, as for some reason compounddefs for + # namespaces are registered with just a single file + # location even if they namespace is spread over + # multiple files + return location_matches(node.location, filename) + + elif isinstance(node, parser.Node_refType): + name = "".join(node) + if isinstance(parent, parser.Node_compounddefType) and nstack.tag in { + "innerclass", + "innernamespace", + }: + # Take the valid_names and every time we handle an + # innerclass or innernamespace, check that its name + # was one of those initial valid names so that we + # never end up rendering a namespace or class that + # wasn't in the initial file. Notably this is + # required as the location attribute for the + # namespace in the xml is unreliable. + if name not in valid_names: + return False + + # Ignore innerclasses and innernamespaces that are inside a + # namespace that is going to be rendered as they will be + # rendered with that namespace and we don't want them twice + if namespace_matches(name, parent): + return False + + elif isinstance(node, parser.Node_memberdefType): # Ignore memberdefs from files which are different to # the one we're rendering. This happens when we have to # cross into a namespace xml file which has entries # from multiple files in it - AndFilter( - InFilter(NodeTypeAccessor(Node()), [parser.Node_memberdefType]), - NotFilter( - FilePathFilter(LambdaAccessor(Node(), lambda x: x.location), filename) - ), - ) - ), - NotFilter( - # Ignore compounddefs which are from another file - # (normally means classes and structs which are in a - # namespace that we have other interests in) but only - # check it if the compounddef is not a namespace - # itself, as for some reason compounddefs for - # namespaces are registered with just a single file - # location even if they namespace is spread over - # multiple files - AndFilter( - InFilter(NodeTypeAccessor(Node()), [parser.Node_compounddefType]), - NotFilter(InFilter(KindAccessor(Node()), ["namespace"])), - NotFilter( - FilePathFilter(LambdaAccessor(Node(), lambda x: x.location), filename) - ), - ) - ), - ) - return AndFilter(self.create_outline_filter(options), filter_) + return path_matches(node.location.file, filename) + + return True + + return filter - def create_content_filter(self, kind: str, options: dict[str, Any]) -> Filter: + @staticmethod + def create_content_filter(kind: Literal['group', 'page', 'namespace'], options: Mapping[str, Any]) -> DoxFilter: """Returns a filter which matches the contents of the or namespace but not the group or namepace name or description. @@ -970,145 +430,140 @@ def create_content_filter(self, kind: str, options: dict[str, Any]) -> Filter: As a finder/content filter we only need to match exactly what we're interested in. """ - if kind not in ["group", "page", "namespace"]: - raise UnrecognisedKindError(kind) + def filter(nstack: NodeStack) -> bool: + node = nstack.node + parent = nstack.parent - node = Node() + if isinstance(node,parser.Node_memberdefType): + return node.prot == parser.DoxProtectionKind.public + + return (isinstance(node,parser.Node_refType) + and isinstance(parent,parser.Node_compounddefType) + and parent.kind.value == kind + and nstack.tag == 'innerclass' + and node.prot == parser.DoxProtectionKind.public) - # Filter for public memberdefs - node_is_memberdef = node.node_type == parser.Node_memberdefType - node_is_public = node.prot == "public" + return filter - public_members = node_is_memberdef & node_is_public - - # Filter for public innerclasses - parent = Parent() - parent_is_compounddef = parent.node_type == parser.Node_compounddefType - parent_is_class = parent.kind == kind - - node_is_innerclass = (node.node_type == parser.Node_refType) & TagFilter(node, ["innerclass"]) - node_is_public = node.prot == "public" - - public_innerclass = ( - parent_is_compounddef & parent_is_class & node_is_innerclass & node_is_public - ) + @classmethod + def create_index_filter(cls, options: Mapping[str, Any]) -> DoxFilter: + outline_filter = cls.create_outline_filter(options) - return public_members | public_innerclass + def filter(nstack: NodeStack) -> bool: + if not outline_filter(nstack): + return False - def create_index_filter(self, options: dict[str, Any]) -> Filter: - filter_ = AndFilter( - NotFilter( - AndFilter( - InFilter(NodeTypeAccessor(Parent()), [parser.Node_compounddefType]), - InFilter(NodeTypeAccessor(Node()), [parser.Node_refType]), - TagFilter(Node(), ["innerclass", "innernamespace"]), - ) - ), - NotFilter( - AndFilter( - InFilter(NodeTypeAccessor(Parent()), [parser.Node_compounddefType]), - InFilter(KindAccessor(Parent()), ["group"]), - InFilter(NodeTypeAccessor(Node()), [parser.Node_sectiondefType]), - InFilter(KindAccessor(Node()), ["func"]), + node = nstack.node + parent = nstack.parent + return not ( + isinstance(parent, parser.Node_compounddefType) + and ( + ( + isinstance(node, parser.Node_refType) + and nstack.tag in ("innerclass", "innernamespace") + ) + or ( + parent.kind == parser.DoxCompoundKind.group + and isinstance(node, parser.Node_sectiondefType) + and node.kind == parser.DoxSectionKind.func + ) ) - ), - ) - - return AndFilter(self.create_outline_filter(options), filter_) - - def create_open_filter(self) -> Filter: - """Returns a completely open filter which matches everything""" + ) - return OpenFilter() + return filter - def create_id_filter(self, node_type: type, refid: str) -> Filter: - node = Node() - return (node.node_type == node_type) & (node.id == refid) + @staticmethod + def create_file_finder_filter(filename: str) -> DoxFilter: + def filter(nstack: NodeStack) -> bool: + node = nstack.node + return ( + isinstance(node, parser.Node_compounddefType) + and node.kind == parser.DoxCompoundKind.file + and location_matches(node.location, filename) + ) - def create_file_finder_filter(self, filename: str) -> Filter: - filter_ = AndFilter( - InFilter(NodeTypeAccessor(Node()), [parser.Node_compounddefType]), - InFilter(KindAccessor(Node()), ["file"]), - FilePathFilter(LambdaAccessor(Node(), lambda x: x.location), filename), - ) - return filter_ + return filter - def create_member_finder_filter(self, namespace: str, name: str, kind: str) -> Filter: + def create_member_finder_filter(self, namespace: str, name: str, kinds: Container[parser.MemberKind] | str) -> DoxFilter: """Returns a filter which looks for a member with the specified name and kind.""" - node = Node() - parent = Parent() + if isinstance(kinds,str): + kinds = (parser.MemberKind(kinds),) - node_matches = (node.node_type == parser.Node_MemberType) & (node.kind == kind) & (node.name == name) + def node_matches(nstack: NodeStack) -> bool: + node = nstack.node + return (isinstance(node,parser.Node_MemberType) + and node.kind in kinds + and node.name == name) if namespace: - parent_matches = ( - (parent.node_type == parser.Node_CompoundType) - & InFilter(parent.kind, ["namespace", "class", "struct", "interface"]) - & (parent.name == namespace) - ) - return parent_matches & node_matches + def filter(nstack: NodeStack) -> bool: + parent = nstack.parent + return (node_matches(nstack) + and isinstance(parent,parser.Node_CompoundType) + and parent.kind in {parser.CompoundKind.namespace, + parser.CompoundKind.class_, + parser.CompoundKind.struct, + parser.CompoundKind.interface} + and parent.name == namespace) else: - is_implementation_file = parent.name.endswith( - self.app.config.breathe_implementation_filename_extensions - ) - parent_is_compound = parent.node_type == parser.Node_CompoundType - parent_is_file = (parent.kind == "file") & (~is_implementation_file) - parent_is_not_file = parent.kind != "file" + ext = self.app.config.breathe_implementation_filename_extensions - return (parent_is_compound & parent_is_file & node_matches) | ( - parent_is_compound & parent_is_not_file & node_matches - ) + def filter(nstack: NodeStack) -> bool: + parent = nstack.parent + return (isinstance(parent,parser.Node_CompoundType) + and (parent.kind != parser.CompoundKind.file or parent.name.endswith(ext))) - def create_function_and_all_friend_finder_filter(self, namespace: str, name: str) -> Filter: - parent = Parent() - parent_is_compound = parent.node_type == parser.Node_CompoundType - parent_is_group = parent.kind == "group" + return filter - function_filter = self.create_member_finder_filter(namespace, name, "function") - friend_filter = self.create_member_finder_filter(namespace, name, "friend") - # Get matching functions but only ones where the parent is not a group. We want to skip - # function entries in groups as we'll find the same functions in a file's xml output - # elsewhere and having more than one match is confusing for our logic later on. - return (function_filter | friend_filter) & ~(parent_is_compound & parent_is_group) + def create_function_and_all_friend_finder_filter(self, namespace: str, name: str) -> DoxFilter: + fun_finder = self.create_member_finder_filter(namespace, name, (parser.MemberKind.function, parser.MemberKind.friend)) - def create_enumvalue_finder_filter(self, name: str) -> Filter: + # Get matching functions but only ones where the parent is not a group. + # We want to skip function entries in groups as we'll find the same + # functions in a file's xml output elsewhere and having more than one + # match is confusing for our logic later on. + def filter(nstack: NodeStack) -> bool: + if not fun_finder(nstack): return False + + parent = nstack.parent + return not (isinstance(parent, parser.Node_CompoundType) + and parent.kind == parser.CompoundKind.group) + + return filter + + @staticmethod + def create_enumvalue_finder_filter(name: str) -> DoxFilter: """Returns a filter which looks for an enumvalue with the specified name.""" - node = Node() - return (node.node_type == parser.Node_enumvalueType) & (node.name == name) + def filter(nstack: NodeStack): + node = nstack.node + return isinstance(node, parser.Node_enumvalueType) and node.name == name + + return filter - def create_compound_finder_filter(self, name: str, kind: str) -> Filter: + @staticmethod + def create_compound_finder_filter(name: str, kind: str) -> DoxFilter: """Returns a filter which looks for a compound with the specified name and kind.""" - node = Node() - return (node.node_type == parser.Node_CompoundType) & (node.kind == kind) & (node.name == name) + def filter(nstack: NodeStack): + node = nstack.node + return ( + isinstance(node, parser.Node_CompoundType) + and node.kind.value == kind + and node.name == name + ) + + return filter - def create_finder_filter(self, kind: str, name: str) -> Filter: + @classmethod + def create_finder_filter( + cls, kind: Literal["group", "page", "namespace"], name: str + ) -> DoxFilter: """Returns a filter which looks for the compound node from the index which is a group node (kind=group) and has the appropriate name The compound node should reference the group file which we can parse for the group contents. """ - - if kind == "group": - filter_ = AndFilter( - InFilter(NodeTypeAccessor(Node()), [parser.Node_CompoundType]), - InFilter(KindAccessor(Node()), ["group"]), - InFilter(NameAccessor(Node()), [name]), - ) - elif kind == "page": - filter_ = AndFilter( - InFilter(NodeTypeAccessor(Node()), [parser.Node_CompoundType]), - InFilter(KindAccessor(Node()), ["page"]), - InFilter(NameAccessor(Node()), [name]), - ) - else: - # Assume kind == 'namespace' - filter_ = AndFilter( - InFilter(NodeTypeAccessor(Node()), [parser.Node_CompoundType]), - InFilter(KindAccessor(Node()), ["namespace"]), - InFilter(NameAccessor(Node()), [name]), - ) - return filter_ + return cls.create_compound_finder_filter(name, kind) diff --git a/breathe/renderer/sphinxrenderer.py b/breathe/renderer/sphinxrenderer.py index 31a51dac..166b396e 100644 --- a/breathe/renderer/sphinxrenderer.py +++ b/breathe/renderer/sphinxrenderer.py @@ -4,6 +4,7 @@ import sphinx from breathe import parser, filetypes +from breathe.renderer.filter import NodeStack from sphinx import addnodes from sphinx.domains import cpp, c, python @@ -20,6 +21,7 @@ from typing import Any, Callable, cast, ClassVar, Generic, Optional, Protocol, Type, TypeVar, TYPE_CHECKING, Union from collections.abc import Iterable, Sequence + php: Any try: from sphinxcontrib import phpdomain as php # type: ignore @@ -38,7 +40,7 @@ if TYPE_CHECKING: from breathe.project import ProjectInfo from breathe.renderer import RenderContext, DataObject - from breathe.renderer.filter import Filter + from breathe.renderer.filter import DoxFilter from breathe.renderer.target import TargetHandler from sphinx.application import Sphinx @@ -361,7 +363,7 @@ def create(domain: str, args) -> ObjectDescription: class NodeFinder(nodes.SparseNodeVisitor): """Find the Docutils desc_signature declarator and desc_content nodes.""" - def __init__(self, document): + def __init__(self, document: nodes.document): super().__init__(document) self.declarator: Declarator | None = None self.content: addnodes.desc_content | None = None @@ -526,7 +528,7 @@ class NodeHandler(Generic[T]): """Dummy callable that associates a set of nodes to a function. This gets unwrapped by NodeVisitor and is never actually called.""" - def __init__(self,handler): + def __init__(self,handler: Callable[[SphinxRenderer, T], list[Node]]): self.handler = handler self.nodes: set[type[parser.NodeOrValue]] = set() @@ -537,7 +539,7 @@ class TaggedNodeHandler(Generic[T]): """Dummy callable that associates a set of nodes to a function. This gets unwrapped by NodeVisitor and is never actually called.""" - def __init__(self,handler): + def __init__(self,handler: Callable[[SphinxRenderer, str, T], list[Node]]): self.handler = handler self.nodes: set[type[parser.NodeOrValue]] = set() @@ -546,18 +548,16 @@ def __call__(self, r: SphinxRenderer, tag: str, node: T, /) -> list[Node]: def node_handler(node: type[parser.NodeOrValue]): def inner(f: Callable[[SphinxRenderer, T], list[Node]]) -> Callable[[SphinxRenderer, T], list[Node]]: - if not isinstance(f,NodeHandler): - f = NodeHandler(f) - f.nodes.add(node) - return f + handler: NodeHandler = f if isinstance(f,NodeHandler) else NodeHandler(f) + handler.nodes.add(node) + return handler return inner def tagged_node_handler(node: type[parser.NodeOrValue]): def inner(f: Callable[[SphinxRenderer, str, T], list[Node]]) -> Callable[[SphinxRenderer, str, T], list[Node]]: - if not isinstance(f,TaggedNodeHandler): - f = TaggedNodeHandler(f) - f.nodes.add(node) - return f + handler: TaggedNodeHandler = f if isinstance(f,TaggedNodeHandler) else TaggedNodeHandler(f) + handler.nodes.add(node) + return handler return inner class NodeVisitor(type): @@ -602,7 +602,7 @@ def __init__( document: nodes.document, target_handler: TargetHandler, compound_parser: parser.DoxygenCompoundParser, - filter_: Filter, + filter_: DoxFilter, ): self.app = app @@ -741,9 +741,11 @@ def run_directive( # (e.g., variable in function), so perhaps skip (see #671). # If there are nodes, there should be at least 2. if len(nodes) != 0: - assert len(nodes) >= 2, nodes + assert len(nodes) >= 2 rst_node = nodes[1] - finder = NodeFinder(rst_node.document) + doc = rst_node.document + assert doc is not None + finder = NodeFinder(doc) rst_node.walk(finder) assert finder.declarator @@ -849,7 +851,7 @@ def debug_print_node(n): names: list[str] = [] for node in self.qualification_stack[1:]: if config.breathe_debug_trace_qualification: - print("{}{}".format(_debug_indent * " ", debug_print_node(node))) # type: ignore + print("{}{}".format(_debug_indent * " ", debug_print_node(node))) # pyright: ignore if isinstance(node,parser.Node_refType) and len(names) == 0: if config.breathe_debug_trace_qualification: print("{}{}".format(_debug_indent * " ", "res=")) @@ -948,7 +950,9 @@ def run_domain_directive(self, kind, names): # Filter out outer class names if we are rendering a member as a part of a class content. rst_node = nodes[1] - finder = NodeFinder(rst_node.document) + doc = rst_node.document + assert doc is not None + finder = NodeFinder(doc) rst_node.walk(finder) assert finder.declarator is not None @@ -1009,7 +1013,7 @@ def pullup(node, typ, dest): para.replace_self(para.children) # and remove empty top-level paragraphs - if isinstance(candNode, nodes.paragraph) and len(candNode) == 0: # type: ignore + if isinstance(candNode, nodes.paragraph) and len(candNode) == 0: # pyright: ignore continue detailed.append(candNode) @@ -1021,7 +1025,7 @@ def pullup(node, typ, dest): fieldLists = [fieldList] # collapse retvals into a single return field - if len(fieldLists) != 0 and sphinx.version_info[0:2] < (4, 3): # type: ignore + if len(fieldLists) != 0 and sphinx.version_info[0:2] < (4, 3): # pyright: ignore others: list[nodes.field] = [] retvals: list[nodes.field] = [] f: nodes.field @@ -1091,7 +1095,9 @@ def render_declaration(self, node: parser.Node_memberdefType, declaration=None, print("{}Doxygen target (old): {}".format(" " * _debug_indent, target[0]["ids"])) rst_node = nodes[1] - finder = NodeFinder(rst_node.document) + doc = rst_node.document + assert doc is not None + finder = NodeFinder(doc) rst_node.walk(finder) assert finder.declarator is not None @@ -1311,11 +1317,11 @@ def def_render_signature( # add base classes if kind in (parser.DoxCompoundKind.class_, parser.DoxCompoundKind.struct): - bs = [] + bs: list[str] = [] for base in file_data.compounddef[0].basecompoundref: - b = [] + b: list[str] = [] if base.prot is not None: - b.append(base.prot) + b.append(base.prot.value) if base.virt == parser.DoxVirtualKind.virtual: b.append("virtual") b.append(base[0]) @@ -1330,7 +1336,9 @@ def def_render_signature( nodes = self.run_domain_directive(kind.value, self.context.directive_args[1]) rst_node = nodes[1] - finder = NodeFinder(rst_node.document) + doc = rst_node.document + assert doc is not None + finder = NodeFinder(doc) rst_node.walk(finder) assert finder.declarator is not None assert finder.content is not None @@ -1766,7 +1774,8 @@ def visit_docsectN(self, node: parser.Node_docSect1Type | parser.Node_docSect2Ty """ section = nodes.section() section["ids"].append(self.get_refid(node.id)) - section += nodes.title(node.title, node.title) + title = node.title or '' + section += nodes.title(title, title) section += self.create_doxygen_target(node) section += self.render_tagged_iterable(node) return [section] @@ -1901,7 +1910,7 @@ def visit_verbatim(self, node: str) -> list[Node]: # consisting of a simple bullet list. # For now we just look for an extended embed tag if node.strip().startswith("embed:rst:leading-asterisk"): - lines = node.splitlines() + lines: Iterable[str] = node.splitlines() # Replace the first * on each line with a blank space lines = map(lambda text: text.replace("*", " ", 1), lines) node = "\n".join(lines) @@ -2243,7 +2252,9 @@ def visit_function(self, node: parser.Node_memberdefType) -> list[Node]: ) rst_node = nodes[1] - finder = NodeFinder(rst_node.document) + doc = rst_node.document + assert doc is not None + finder = NodeFinder(doc) rst_node.walk(finder) assert finder.content is not None @@ -2445,7 +2456,7 @@ def visit_templateparam( dom = "cpp" appendDeclName = True if insertDeclNameByParsing: - if dom == "cpp" and sphinx.version_info >= (4, 1, 0): # type: ignore + if dom == "cpp" and sphinx.version_info >= (4, 1, 0): # pyright: ignore parser = cpp.DefinitionParser( "".join(n.astext() for n in nodelist), location=self.state.state_machine.get_source_and_line(), @@ -2467,7 +2478,7 @@ def visit_templateparam( # the actual nodes don't matter, as it is astext()-ed later nodelist = [nodes.Text(str(ast))] appendDeclName = False - except cpp.DefinitionError: + except cpp.DefinitionError: # pyright: ignore # happens with "typename ...Args", so for now, just append pass @@ -2508,7 +2519,7 @@ def visit_templateparamlist(self, node: parser.Node_templateparamlistType) -> li def visit_docparamlist(self, node: parser.Node_docParamListType) -> list[Node]: """Parameter/Exception/TemplateParameter documentation""" - has_retval = sphinx.version_info[0:2] < (4, 3) # type: ignore + has_retval = sphinx.version_info[0:2] < (4, 3) # pyright: ignore fieldListName = { parser.DoxParamListKind.param: "param", parser.DoxParamListKind.exception: "throws", @@ -2795,7 +2806,7 @@ def render_tagged(self, item: parser.TaggedValue[str,parser.NodeOrValue] | str) if h is not None: assert self.context is not None with WithContext(self, self.context.create_child_context(item.value, item.name)): - if not self.filter_.allow(self.context.node_stack): return [] + if not self.filter_(NodeStack(self.context.node_stack)): return [] return h(self,item.name, item.value) return self.render(item.value) @@ -2806,7 +2817,7 @@ def render(self, node: parser.NodeOrValue, context: RenderContext | None = None, with WithContext(self, context): assert self.context is not None result: list[Node] = [] - if not self.filter_.allow(self.context.node_stack): + if not self.filter_(NodeStack(self.context.node_stack)): pass else: method = self.node_handlers.get(type(node)) diff --git a/breathe/renderer/target.py b/breathe/renderer/target.py index 77691c0a..a3284790 100644 --- a/breathe/renderer/target.py +++ b/breathe/renderer/target.py @@ -1,9 +1,13 @@ -from breathe.project import ProjectInfo +from __future__ import annotations from docutils import nodes -from docutils.nodes import Element -from typing import Any, Dict, List, Sequence +from typing import Any, TYPE_CHECKING + +if TYPE_CHECKING: + from collections.abc import Mapping, Sequence + from docutils.nodes import Element + from breathe.project import ProjectInfo class TargetHandler: @@ -16,7 +20,7 @@ def __init__(self, project_info: ProjectInfo, document: nodes.document): self.project_info = project_info self.document = document - def create_target(self, refid: str) -> List[Element]: + def create_target(self, refid: str) -> list[Element]: """Creates a target node and registers it with the document and returns it in a list""" target = nodes.target(ids=[refid], names=[refid]) @@ -29,12 +33,12 @@ def create_target(self, refid: str) -> List[Element]: class _NullTargetHandler(TargetHandler): - def create_target(self, refid: str) -> List[Element]: + def create_target(self, refid: str) -> list[Element]: return [] def create_target_handler( - options: Dict[str, Any], project_info: ProjectInfo, document: nodes.document + options: Mapping[str, Any], project_info: ProjectInfo, document: nodes.document ) -> TargetHandler: if "no-link" in options: return _NullTargetHandler() diff --git a/setup.py b/setup.py index 0b6a187c..515aa882 100644 --- a/setup.py +++ b/setup.py @@ -30,8 +30,8 @@ requires = ["Sphinx>=4.0,!=5.0.0", "docutils>=0.12"] -if sys.version_info < (3, 7): - print("ERROR: Sphinx requires at least Python 3.7 to run.") +if sys.version_info < (3, 8): + print("ERROR: Sphinx requires at least Python 3.8 to run.") sys.exit(1) diff --git a/tests/test_renderer.py b/tests/test_renderer.py index 83117c19..0df7a226 100644 --- a/tests/test_renderer.py +++ b/tests/test_renderer.py @@ -6,7 +6,6 @@ import sphinx.environment from breathe import parser, renderer from breathe.renderer.sphinxrenderer import SphinxRenderer -from breathe.renderer.filter import OpenFilter import docutils.parsers.rst from docutils import frontend, nodes, utils from sphinx.testing.fixtures import ( @@ -250,7 +249,7 @@ def render( None, # document MockTargetHandler(), compound_parser, - OpenFilter(), + (lambda nstack: True), ) renderer.context = MockContext(app, [member_def], domain, options) return renderer.render(member_def) From 8b11b893c5ee649940e05611f789728c6f9881f5 Mon Sep 17 00:00:00 2001 From: Rouslan Korneychuk Date: Fri, 1 Dec 2023 00:37:29 -0500 Subject: [PATCH 23/65] Fixed the filters --- breathe/renderer/filter.py | 187 ++++++++++++++++++++++++------------- tests/conftest.py | 31 ++++++ tests/data/classSample.xml | 128 +++++++++++++++++++++++++ tests/test_filters.py | 142 ++++++++++++++++++++++++++++ tests/test_renderer.py | 36 +------ 5 files changed, 424 insertions(+), 100 deletions(-) create mode 100644 tests/conftest.py create mode 100644 tests/data/classSample.xml create mode 100644 tests/test_filters.py diff --git a/breathe/renderer/filter.py b/breathe/renderer/filter.py index 97dec5bf..160901bf 100644 --- a/breathe/renderer/filter.py +++ b/breathe/renderer/filter.py @@ -63,13 +63,17 @@ T_options = TypeVar("T_options", DoxClassOptions, DoxContentBlockOptions) - DoxFilter: TypeAlias = Callable[["NodeStack"],bool] + DoxFilter: TypeAlias = Callable[["NodeStack"], bool] else: DoxClassOptions = None DoxNamespaceOptions = None -CLASS_LIKE_COMPOUNDDEF = (parser.DoxCompoundKind.class_, parser.DoxCompoundKind.struct, parser.DoxCompoundKind.interface) +CLASS_LIKE_COMPOUNDDEF = ( + parser.DoxCompoundKind.class_, + parser.DoxCompoundKind.struct, + parser.DoxCompoundKind.interface, +) class NodeStack: @@ -117,19 +121,10 @@ def location_matches(location: parser.Node_locationType | None, target_file: str def namespace_matches(name: str, node: parser.Node_compounddefType): to_find = name.rpartition("::")[0] return any(to_find == "".join(ns) for ns in node.innernamespace) or any( - to_find == "".join(ns) for ns in node.innernamespace + to_find == "".join(ns) for ns in node.innerclass ) -class Glob: - def __init__(self, method, pattern): - self.method = method - self.pattern = pattern - - def match(self, name): - return self.method(name, self.pattern) - - class FilterFactory: # C++ style public entries public_kinds = set( @@ -152,7 +147,9 @@ def set_defaults(self, options: T_options) -> T_options: r.setdefault(m, "") return r - def create_render_filter(self, kind: Literal['group', 'page', 'namespace'], options: DoxContentBlockOptions) -> DoxFilter: + def create_render_filter( + self, kind: Literal["group", "page", "namespace"], options: DoxContentBlockOptions + ) -> DoxFilter: """Render filter for group & namespace blocks""" filter_options = self.set_defaults(options) @@ -166,12 +163,18 @@ def create_render_filter(self, kind: Literal['group', 'page', 'namespace'], opti def filter(nstack: NodeStack) -> bool: grandparent = nstack.ancestor(2) - return ((cm_filter(nstack) or ( - isinstance(grandparent,parser.Node_compounddefType) - and grandparent.kind not in CLASS_LIKE_COMPOUNDDEF - and isinstance(nstack.node, parser.Node_memberdefType))) + return ( + ( + cm_filter(nstack) + or ( + isinstance(grandparent, parser.Node_compounddefType) + and grandparent.kind not in CLASS_LIKE_COMPOUNDDEF + and isinstance(nstack.node, parser.Node_memberdefType) + ) + ) and ic_filter(nstack) - and o_filter(nstack)) + and o_filter(nstack) + ) return filter @@ -185,10 +188,12 @@ def create_class_filter(self, target: str, options: DoxClassOptions) -> DoxFilte o_filter = self.create_outline_filter(filter_options) s_filter = self.create_show_filter(filter_options) - return (lambda nstack: cm_filter(nstack) - and ic_filter(nstack) - and o_filter(nstack) - and s_filter(nstack)) + return ( + lambda nstack: cm_filter(nstack) + and ic_filter(nstack) + and o_filter(nstack) + and s_filter(nstack) + ) @classmethod def create_innerclass_filter( @@ -203,8 +208,10 @@ def create_innerclass_filter( is in. """ allowed: set[parser.DoxProtectionKind] = set() - if "protected-members" in options: allowed.add(parser.DoxProtectionKind.protected) - if "private-members" in options: allowed.add(parser.DoxProtectionKind.private) + if "protected-members" in options: + allowed.add(parser.DoxProtectionKind.protected) + if "private-members" in options: + allowed.add(parser.DoxProtectionKind.private) description = cls._create_description_filter(True, parser.Node_compounddefType) @@ -223,13 +230,17 @@ def filter(nstack: NodeStack) -> bool: node = nstack.node parent = nstack.parent - return (not (isinstance(node,parser.Node_refType) - and nstack.tag == "innerclass" - and isinstance(parent,parser.Node_compounddefType) - and parent.kind in CLASS_LIKE_COMPOUNDDEF) - or node.prot in allowed - or (members is not None and ''.join(node) in members) - or description(nstack)) + return ( + not ( + isinstance(node, parser.Node_refType) + and nstack.tag == "innerclass" + and isinstance(parent, parser.Node_compounddefType) + and parent.kind in CLASS_LIKE_COMPOUNDDEF + ) + or node.prot in allowed + or (members is not None and "".join(node) in members) + or description(nstack) + ) return filter @@ -258,16 +269,21 @@ def _create_description_filter(allow: bool, level: type[parser.Node]) -> DoxFilt if allow: # Let through any description children of sectiondefs if we output any kind members def filter(nstack: NodeStack) -> bool: - return not isinstance(nstack.parent,level) or isinstance(nstack.node,parser.Node_descriptionType) + return not isinstance(nstack.parent, level) or isinstance( + nstack.node, parser.Node_descriptionType + ) + else: # Nothing with a parent that's a sectiondef def filter(nstack: NodeStack) -> bool: - return not isinstance(nstack.parent,level) + return not isinstance(nstack.parent, level) return filter @staticmethod - def _create_public_members_filter(options: DoxNamespaceOptions) -> Callable[[parser.Node_memberdefType],bool]: + def _create_public_members_filter( + options: DoxNamespaceOptions, + ) -> Callable[[parser.Node_memberdefType], bool]: if "members" in options: # If the user has specified the 'members' option with arguments then # we only pay attention to that and not to any other member settings @@ -281,11 +297,13 @@ def _create_public_members_filter(options: DoxNamespaceOptions) -> Callable[[par # members list def filter(node: parser.Node_memberdefType) -> bool: return node.name in members + else: # Select anything that doesn't have a parent which is a # sectiondef, or, if it does, only select the public ones def filter(node: parser.Node_memberdefType) -> bool: return node.prot == parser.DoxProtectionKind.public + else: # Nothing with a parent that's a sectiondef def filter(node: parser.Node_memberdefType) -> bool: @@ -337,11 +355,17 @@ def create_class_member_filter(cls, options: DoxNamespaceOptions) -> DoxFilter: # empty, allow the ones with an equal 'prot' attribute def filter(nstack: NodeStack) -> bool: node = nstack.node - return (((not (isinstance(node,parser.Node_memberdefType) and isinstance(nstack.parent,parser.Node_sectiondefType)) - or (bool(prot_filter) and node.prot in prot_filter) - or public_members(node)) - and undoc_members(nstack)) - or description(nstack)) + return ( + ( + not ( + isinstance(node, parser.Node_memberdefType) + and isinstance(nstack.parent, parser.Node_sectiondefType) + ) + or (bool(prot_filter) and node.prot in prot_filter) + or public_members(node) + ) + and undoc_members(nstack) + ) or description(nstack) return filter @@ -355,8 +379,16 @@ def create_outline_filter(options: Mapping[str, Any]) -> DoxFilter: return lambda nstack: True @classmethod - def create_file_filter(cls, filename: str, options: Mapping[str, Any]) -> DoxFilter: + def create_file_filter( + cls, + filename: str, + options: Mapping[str, Any], + *, + init_valid_names: Iterable[str] | None = None, + ) -> DoxFilter: valid_names: set[str] = set() + if init_valid_names: + valid_names.update(init_valid_names) outline_filter = cls.create_outline_filter(options) @@ -420,7 +452,9 @@ def filter(nstack: NodeStack) -> bool: return filter @staticmethod - def create_content_filter(kind: Literal['group', 'page', 'namespace'], options: Mapping[str, Any]) -> DoxFilter: + def create_content_filter( + kind: Literal["group", "page", "namespace"], options: Mapping[str, Any] + ) -> DoxFilter: """Returns a filter which matches the contents of the or namespace but not the group or namepace name or description. @@ -434,14 +468,16 @@ def filter(nstack: NodeStack) -> bool: node = nstack.node parent = nstack.parent - if isinstance(node,parser.Node_memberdefType): + if isinstance(node, parser.Node_memberdefType): return node.prot == parser.DoxProtectionKind.public - - return (isinstance(node,parser.Node_refType) - and isinstance(parent,parser.Node_compounddefType) - and parent.kind.value == kind - and nstack.tag == 'innerclass' - and node.prot == parser.DoxProtectionKind.public) + + return ( + isinstance(node, parser.Node_refType) + and isinstance(parent, parser.Node_compounddefType) + and parent.kind.value == kind + and nstack.tag == "innerclass" + and node.prot == parser.DoxProtectionKind.public + ) return filter @@ -484,51 +520,68 @@ def filter(nstack: NodeStack) -> bool: return filter - def create_member_finder_filter(self, namespace: str, name: str, kinds: Container[parser.MemberKind] | str) -> DoxFilter: + def create_member_finder_filter( + self, namespace: str, name: str, kinds: Container[parser.MemberKind] | str + ) -> DoxFilter: """Returns a filter which looks for a member with the specified name and kind.""" - if isinstance(kinds,str): + if isinstance(kinds, str): kinds = (parser.MemberKind(kinds),) def node_matches(nstack: NodeStack) -> bool: node = nstack.node - return (isinstance(node,parser.Node_MemberType) - and node.kind in kinds - and node.name == name) + return ( + isinstance(node, parser.Node_MemberType) + and node.kind in kinds + and node.name == name + ) if namespace: + def filter(nstack: NodeStack) -> bool: parent = nstack.parent - return (node_matches(nstack) - and isinstance(parent,parser.Node_CompoundType) - and parent.kind in {parser.CompoundKind.namespace, - parser.CompoundKind.class_, - parser.CompoundKind.struct, - parser.CompoundKind.interface} - and parent.name == namespace) + return ( + node_matches(nstack) + and isinstance(parent, parser.Node_CompoundType) + and parent.kind + in { + parser.CompoundKind.namespace, + parser.CompoundKind.class_, + parser.CompoundKind.struct, + parser.CompoundKind.interface, + } + and parent.name == namespace + ) + else: ext = self.app.config.breathe_implementation_filename_extensions def filter(nstack: NodeStack) -> bool: parent = nstack.parent - return (isinstance(parent,parser.Node_CompoundType) - and (parent.kind != parser.CompoundKind.file or parent.name.endswith(ext))) + return isinstance(parent, parser.Node_CompoundType) and ( + parent.kind != parser.CompoundKind.file or parent.name.endswith(ext) + ) return filter def create_function_and_all_friend_finder_filter(self, namespace: str, name: str) -> DoxFilter: - fun_finder = self.create_member_finder_filter(namespace, name, (parser.MemberKind.function, parser.MemberKind.friend)) + fun_finder = self.create_member_finder_filter( + namespace, name, (parser.MemberKind.function, parser.MemberKind.friend) + ) # Get matching functions but only ones where the parent is not a group. # We want to skip function entries in groups as we'll find the same # functions in a file's xml output elsewhere and having more than one - # match is confusing for our logic later on. + # match is confusing for our logic later on. def filter(nstack: NodeStack) -> bool: - if not fun_finder(nstack): return False + if not fun_finder(nstack): + return False parent = nstack.parent - return not (isinstance(parent, parser.Node_CompoundType) - and parent.kind == parser.CompoundKind.group) + return not ( + isinstance(parent, parser.Node_CompoundType) + and parent.kind == parser.CompoundKind.group + ) return filter diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 00000000..b620dca5 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,31 @@ +import pytest +from sphinx.testing.fixtures import ( + test_params, + app_params, + make_app, + shared_result, + sphinx_test_tempdir, + rootdir, +) + +@pytest.fixture(scope="function") +def app(test_params, app_params, make_app, shared_result): + """ + Based on sphinx.testing.fixtures.app + """ + args, kwargs = app_params + assert "srcdir" in kwargs + kwargs["srcdir"].mkdir(parents=True,exist_ok=True) + (kwargs["srcdir"] / "conf.py").write_text("") + app_ = make_app(*args, **kwargs) + yield app_ + + print("# testroot:", kwargs.get("testroot", "root")) + print("# builder:", app_.builder.name) + print("# srcdir:", app_.srcdir) + print("# outdir:", app_.outdir) + print("# status:", "\n" + app_._status.getvalue()) + print("# warning:", "\n" + app_._warning.getvalue()) + + if test_params["shared_result"]: + shared_result.store(test_params["shared_result"], app_) \ No newline at end of file diff --git a/tests/data/classSample.xml b/tests/data/classSample.xml new file mode 100644 index 00000000..47befad9 --- /dev/null +++ b/tests/data/classSample.xml @@ -0,0 +1,128 @@ + + + + Sample + sample.hpp + + + int + int public_field + + public_field + Sample::public_field + + + + + + + + + + + + int + int protected_field + + protected_field + Sample::protected_field + + + + + + + + + + + + int + int private_field + + private_field + Sample::private_field + + + + + + + + + + + + int + int public_method + (int x) + public_method + Sample::public_method + + int + x + + + + + + + + + + + + + int + int protected_method + (int x) + protected_method + Sample::protected_method + + int + x + + + + + + + + + + + + + int + int private_method + (int x) + private_method + Sample::private_method + + int + x + + + + + + + + + + + + + + + + + Sampleprivate_field + Sampleprivate_method + Sampleprotected_field + Sampleprotected_method + Samplepublic_field + Samplepublic_method + + + diff --git a/tests/test_filters.py b/tests/test_filters.py new file mode 100644 index 00000000..38dbfb36 --- /dev/null +++ b/tests/test_filters.py @@ -0,0 +1,142 @@ +from __future__ import annotations + +import os.path +import pytest +from typing import NamedTuple + +from breathe import parser +from breathe.renderer import TaggedNode +from breathe.renderer.filter import FilterFactory, NodeStack + + +DEFAULT_OPTS = opts = { + 'path': '', + 'project': '', + 'membergroups': '', + 'show': '', + 'undoc-members': None} + +@pytest.fixture(scope="module") +def class_doc(): + with open(os.path.join(os.path.dirname(__file__), "data", "classSample.xml"), "rb") as fid: + return parser.parse_file(fid).value + +class SampleMembers(NamedTuple): + public_field: NodeStack + public_method: NodeStack + protected_field: NodeStack + protected_method: NodeStack + private_field: NodeStack + private_method: NodeStack + +@pytest.fixture +def members(class_doc): + common = [ + TaggedNode(None, class_doc.compounddef[0]), + TaggedNode(None, class_doc) + ] + + memberdefs = {} + + for sect in class_doc.compounddef[0].sectiondef: + member = sect.memberdef[0] + memberdefs[member.name] = NodeStack([TaggedNode(None, member), TaggedNode(None, sect)] + common) + + return SampleMembers( + memberdefs['public_field'], + memberdefs['public_method'], + memberdefs['protected_field'], + memberdefs['protected_method'], + memberdefs['private_field'], + memberdefs['private_method'] + ) + +def create_class_filter(app, extra_ops): + opts = DEFAULT_OPTS.copy() + opts.update(extra_ops) + return FilterFactory(app).create_class_filter('Sample', opts) + +def test_no_class_members(app, members): + app.config.breathe_default_members = [] + + filter = create_class_filter(app,{}) + + assert not filter(members.public_field) + assert not filter(members.public_method) + assert not filter(members.protected_field) + assert not filter(members.protected_method) + assert not filter(members.private_field) + assert not filter(members.private_method) + +def test_public_class_members(app, members): + app.config.breathe_default_members = [] + + filter = create_class_filter(app,{'members': ''}) + + assert filter(members.public_field) + assert filter(members.public_method) + assert not filter(members.protected_field) + assert not filter(members.protected_method) + assert not filter(members.private_field) + assert not filter(members.private_method) + +def test_prot_class_members(app, members): + app.config.breathe_default_members = [] + + filter = create_class_filter(app,{ + 'members': '', + 'protected-members': None}) + + assert filter(members.public_field) + assert filter(members.public_method) + assert filter(members.protected_field) + assert filter(members.protected_method) + assert not filter(members.private_field) + assert not filter(members.private_method) + +def test_all_class_members(app, members): + app.config.breathe_default_members = [] + + filter = create_class_filter(app,{ + 'members': '', + 'protected-members': None, + 'private-members': None}) + + assert filter(members.public_field) + assert filter(members.public_method) + assert filter(members.protected_field) + assert filter(members.protected_method) + assert filter(members.private_field) + assert filter(members.private_method) + +def test_specific_class_members(app, members): + app.config.breathe_default_members = [] + + filter = create_class_filter(app,{ + 'members': 'public_method,protected_method,private_field'}) + + assert not filter(members.public_field) + assert filter(members.public_method) + assert not filter(members.protected_field) + assert filter(members.protected_method) + assert filter(members.private_field) + assert not filter(members.private_method) + +def test_nested_class_filtered(app): + app.config.breathe_default_members = [] + + doc = parser.parse_str(""" + + sample.hpp + Sample + Sample::Inner + + + """) + + compounddef = doc.value.compounddef[0] + ref_outer, ref_inner = compounddef.innerclass + + filter = FilterFactory(app).create_file_filter('sample.hpp', DEFAULT_OPTS, init_valid_names=('Sample','Sample::Inner')) + assert filter(NodeStack([TaggedNode('innerclass',ref_outer), TaggedNode(None, compounddef)])) + assert not filter(NodeStack([TaggedNode('innerclass',ref_inner), TaggedNode(None, compounddef)])) diff --git a/tests/test_renderer.py b/tests/test_renderer.py index 0df7a226..20ba51b7 100644 --- a/tests/test_renderer.py +++ b/tests/test_renderer.py @@ -8,14 +8,7 @@ from breathe.renderer.sphinxrenderer import SphinxRenderer import docutils.parsers.rst from docutils import frontend, nodes, utils -from sphinx.testing.fixtures import ( - test_params, - app_params, - make_app, - shared_result, - sphinx_test_tempdir, - rootdir, -) + sphinx.locale.init([], "") COMMON_ARGS_memberdefType = { @@ -25,28 +18,6 @@ 'location': parser.Node_locationType(file = '', line = 0) } -@pytest.fixture(scope="function") -def app(test_params, app_params, make_app, shared_result): - """ - Based on sphinx.testing.fixtures.app - """ - args, kwargs = app_params - assert "srcdir" in kwargs - kwargs["srcdir"].mkdir(parents=True,exist_ok=True) - (kwargs["srcdir"] / "conf.py").write_text("") - app_ = make_app(*args, **kwargs) - yield app_ - - print("# testroot:", kwargs.get("testroot", "root")) - print("# builder:", app_.builder.name) - print("# srcdir:", app_.srcdir) - print("# outdir:", app_.outdir) - print("# status:", "\n" + app_._status.getvalue()) - print("# warning:", "\n" + app_._warning.getvalue()) - - if test_params["shared_result"]: - shared_result.store(test_params["shared_result"], app_) - class MockMemo: def __init__(self): @@ -508,9 +479,8 @@ def get_directive(app): def get_matches(datafile) -> tuple[list[str], list[list[renderer.TaggedNode]]]: argsstrings = [] - with open(os.path.join(os.path.dirname(__file__), "data", datafile)) as fid: - xml = fid.read() - doc = parser.parse_str(xml) + with open(os.path.join(os.path.dirname(__file__), "data", datafile), 'rb') as fid: + doc = parser.parse_file(fid) assert isinstance(doc.value,parser.Node_DoxygenType) sectiondef = doc.value.compounddef[0].sectiondef[0] From a670b76344e7e741cc2d7ec2cf6cac038393ffea Mon Sep 17 00:00:00 2001 From: Rouslan Korneychuk Date: Mon, 4 Dec 2023 20:46:24 -0500 Subject: [PATCH 24/65] Small fixes and new tests --- breathe/directives/class_like.py | 6 +- breathe/directives/content_block.py | 20 +- breathe/directives/function.py | 8 +- breathe/directives/index.py | 2 + breathe/directives/item.py | 23 +- breathe/filetypes.py | 2 + breathe/finder/factory.py | 11 +- breathe/finder/index.py | 2 +- breathe/project.py | 12 +- breathe/renderer/filter.py | 17 +- breathe/renderer/sphinxrenderer.py | 24 +- setup.cfg | 1 + setup.py | 49 +++- tests/data/examples/test_array/compare.xml | 17 ++ tests/data/examples/test_array/input.rst | 2 + tests/data/examples/test_c_enum/compare.xml | 139 ++++++++++ tests/data/examples/test_c_enum/input.rst | 11 + tests/data/examples/test_class/compare.xml | 254 ++++++++++++++++++ tests/data/examples/test_class/input.rst | 1 + .../examples/test_code_blocks/compare.xml | 36 +++ .../data/examples/test_code_blocks/input.rst | 1 + .../examples/test_cpp_concept/compare.xml | 9 + .../data/examples/test_cpp_concept/input.rst | 1 + tests/data/examples/test_cpp_enum/compare.xml | 48 ++++ tests/data/examples/test_cpp_enum/input.rst | 4 + tests/data/examples/test_define/compare.xml | 49 ++++ tests/data/examples/test_define/input.rst | 4 + .../data/examples/test_dot_graphs/compare.xml | 17 ++ tests/data/examples/test_dot_graphs/input.rst | 1 + tests/data/examples/test_headings/compare.xml | 15 ++ tests/data/examples/test_headings/input.rst | 1 + tests/data/examples/test_rst/compare.xml | 76 ++++++ tests/data/examples/test_rst/input.rst | 2 + .../test_template_class_non_type/compare.xml | 78 ++++++ .../test_template_class_non_type/input.rst | 2 + tests/test_examples.py | 192 +++++++++++++ tests/test_renderer.py | 6 +- xml_parser_generator/schema.json | 28 +- 38 files changed, 1109 insertions(+), 62 deletions(-) create mode 100644 tests/data/examples/test_array/compare.xml create mode 100644 tests/data/examples/test_array/input.rst create mode 100644 tests/data/examples/test_c_enum/compare.xml create mode 100644 tests/data/examples/test_c_enum/input.rst create mode 100644 tests/data/examples/test_class/compare.xml create mode 100644 tests/data/examples/test_class/input.rst create mode 100644 tests/data/examples/test_code_blocks/compare.xml create mode 100644 tests/data/examples/test_code_blocks/input.rst create mode 100644 tests/data/examples/test_cpp_concept/compare.xml create mode 100644 tests/data/examples/test_cpp_concept/input.rst create mode 100644 tests/data/examples/test_cpp_enum/compare.xml create mode 100644 tests/data/examples/test_cpp_enum/input.rst create mode 100644 tests/data/examples/test_define/compare.xml create mode 100644 tests/data/examples/test_define/input.rst create mode 100644 tests/data/examples/test_dot_graphs/compare.xml create mode 100644 tests/data/examples/test_dot_graphs/input.rst create mode 100644 tests/data/examples/test_headings/compare.xml create mode 100644 tests/data/examples/test_headings/input.rst create mode 100644 tests/data/examples/test_rst/compare.xml create mode 100644 tests/data/examples/test_rst/input.rst create mode 100644 tests/data/examples/test_template_class_non_type/compare.xml create mode 100644 tests/data/examples/test_template_class_non_type/input.rst create mode 100644 tests/test_examples.py diff --git a/breathe/directives/class_like.py b/breathe/directives/class_like.py index 1ccb8756..0ba259f5 100644 --- a/breathe/directives/class_like.py +++ b/breathe/directives/class_like.py @@ -11,7 +11,11 @@ from typing import cast, ClassVar, TYPE_CHECKING if TYPE_CHECKING: - from typing_extensions import NotRequired, TypedDict + import sys + if sys.version_info >= (3, 11): + from typing import NotRequired, TypedDict + else: + from typing_extensions import NotRequired, TypedDict from breathe import renderer from docutils.nodes import Node diff --git a/breathe/directives/content_block.py b/breathe/directives/content_block.py index be6ebd23..42a5f204 100644 --- a/breathe/directives/content_block.py +++ b/breathe/directives/content_block.py @@ -15,7 +15,13 @@ from typing import Any, cast, ClassVar, Literal, TYPE_CHECKING if TYPE_CHECKING: - from typing_extensions import NotRequired, TypedDict + import sys + if sys.version_info >= (3, 11): + from typing import NotRequired, TypedDict + else: + from typing_extensions import NotRequired, TypedDict + from breathe.renderer import TaggedNode + from breathe.finder.factory import FinderRoot DoxContentBlockOptions = TypedDict('DoxContentBlockOptions',{ 'path': str, @@ -32,6 +38,7 @@ 'sort': NotRequired[None]}) else: DoxContentBlockOptions = None + FinderRoot = None class _DoxygenContentBlockDirective(BaseDirective): @@ -74,8 +81,7 @@ def run(self) -> list[Node]: finder_filter = self.filter_factory.create_finder_filter(self.kind, name) - # TODO: find a more specific type for the Doxygen nodes - matches: list[Any] = [] + matches: list[list[TaggedNode]] = [] finder.filter_(finder_filter, matches) # It shouldn't be possible to have too many matches as namespaces & groups in their nature @@ -92,10 +98,10 @@ def run(self) -> list[Node]: # Having found the compound node for the namespace or group in the index we want to grab # the contents of it which match the filter contents_finder = self.finder_factory.create_finder_from_root( - node_stack[0], project_info + cast(FinderRoot,node_stack[0].value), project_info ) - # TODO: find a more specific type for the Doxygen nodes - contents: list[Any] = [] + + contents: list[list[TaggedNode]] = [] contents_finder.filter_(filter_, contents) # Replaces matches with our new starting points @@ -109,7 +115,7 @@ def run(self) -> list[Node]: object_renderer = SphinxRenderer( self.parser_factory.app, project_info, - node_stack, + [item.value for item in node_stack], self.state, self.state.document, target_handler, diff --git a/breathe/directives/function.py b/breathe/directives/function.py index 04371649..8b0b29c4 100644 --- a/breathe/directives/function.py +++ b/breathe/directives/function.py @@ -21,7 +21,11 @@ from typing import cast, List, Optional, TYPE_CHECKING if TYPE_CHECKING: - from typing_extensions import NotRequired, TypedDict + import sys + if sys.version_info >= (3, 11): + from typing import NotRequired, TypedDict + else: + from typing_extensions import NotRequired, TypedDict from breathe import project from docutils.nodes import Node @@ -118,7 +122,7 @@ def run(self) -> List[Node]: # only take functions and friend functions # ignore friend classes node = m[0].value - assert isinstance(node, parser.Node_MemberType) + assert isinstance(node, parser.Node_memberdefType) if node.kind == "friend" and not node.argsstring: continue matches.append(m) diff --git a/breathe/directives/index.py b/breathe/directives/index.py index 029f7e4d..6c4952c8 100644 --- a/breathe/directives/index.py +++ b/breathe/directives/index.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from breathe.directives import BaseDirective from breathe import parser from breathe.project import ProjectError diff --git a/breathe/directives/item.py b/breathe/directives/item.py index c6cb0fc5..56ec1ba0 100644 --- a/breathe/directives/item.py +++ b/breathe/directives/item.py @@ -10,11 +10,18 @@ from docutils.parsers.rst.directives import unchanged_required, flag -from typing import Any, cast, ClassVar, TYPE_CHECKING +from typing import cast, ClassVar, TYPE_CHECKING if TYPE_CHECKING: + import sys + + if sys.version_info >= (3, 11): + from typing import NotRequired, TypedDict + else: + from typing_extensions import NotRequired, TypedDict + + from breathe.renderer import TaggedNode from breathe.renderer.filter import DoxFilter - from typing_extensions import NotRequired, TypedDict DoxBaseItemOptions = TypedDict('DoxBaseItemOptions',{ 'path': str, @@ -44,12 +51,9 @@ def create_finder_filter(self, namespace: str, name: str) -> DoxFilter: return self.filter_factory.create_member_finder_filter(namespace, name, self.kind) def run(self) -> list[Node]: - options = cast(DoxBaseItemOptions,self.options) - - try: - namespace, name = self.arguments[0].rsplit("::", 1) - except ValueError: - namespace, name = "", self.arguments[0] + options = cast(DoxBaseItemOptions, self.options) + + namespace, _, name = self.arguments[0].rpartition("::") try: project_info = self.project_info_factory.create_project_info(options) @@ -65,8 +69,7 @@ def run(self) -> list[Node]: finder_filter = self.create_finder_filter(namespace, name) - # TODO: find a more specific type for the Doxygen nodes - matches: list[Any] = [] + matches: list[list[TaggedNode]] = [] finder.filter_(finder_filter, matches) if len(matches) == 0: diff --git a/breathe/filetypes.py b/breathe/filetypes.py index 123fe221..803b77fc 100644 --- a/breathe/filetypes.py +++ b/breathe/filetypes.py @@ -2,6 +2,8 @@ A module to house the methods for resolving a code-blocks language based on filename (and extension). """ +from __future__ import annotations + import os.path from pygments.lexers import get_lexer_for_filename diff --git a/breathe/finder/factory.py b/breathe/finder/factory.py index bf187e47..86da4f7a 100644 --- a/breathe/finder/factory.py +++ b/breathe/finder/factory.py @@ -15,6 +15,15 @@ from breathe.renderer.filter import DoxFilter ItemFinderCreator = Callable[[ProjectInfo,Any,'DoxygenItemFinderFactory'],ItemFinder] + FinderRoot = (parser.Node_DoxygenTypeIndex + | parser.Node_CompoundType + | parser.Node_MemberType + | parser.Node_DoxygenType + | parser.Node_compounddefType + | parser.Node_sectiondefType + | parser.Node_memberdefType + | parser.Node_refType) + class _CreateCompoundTypeSubFinder: def __init__(self, app: Sphinx, parser_factory: parser.DoxygenParserFactory): @@ -60,7 +69,7 @@ def create_finder(self, project_info: ProjectInfo) -> Finder: root = self.parser.parse(project_info) return self.create_finder_from_root(root, project_info) - def create_finder_from_root(self, root, project_info: ProjectInfo) -> Finder: + def create_finder_from_root(self, root: FinderRoot, project_info: ProjectInfo) -> Finder: finders: dict[type[parser.NodeOrValue], ItemFinderCreator] = { parser.Node_DoxygenTypeIndex: indexfinder.DoxygenTypeSubItemFinder, parser.Node_CompoundType: _CreateCompoundTypeSubFinder(self.app, self.parser_factory), diff --git a/breathe/finder/index.py b/breathe/finder/index.py index 12b65bb8..9b690946 100644 --- a/breathe/finder/index.py +++ b/breathe/finder/index.py @@ -60,7 +60,7 @@ def filter_(self, ancestors: list[TaggedNode], filter_: DoxFilter, matches) -> N finder = self.item_finder_factory.create_finder(file_data) for member_stack in member_matches: - refid = member_stack[0].refid + refid = member_stack[0].value.refid def ref_filter(nstack): node = nstack.node return isinstance(node,parser.Node_memberdefType) and node.id == refid diff --git a/breathe/project.py b/breathe/project.py index 89127427..6cd2a463 100644 --- a/breathe/project.py +++ b/breathe/project.py @@ -5,13 +5,18 @@ from sphinx.application import Sphinx import os +import os.path import fnmatch from typing import TYPE_CHECKING if TYPE_CHECKING: - from typing_extensions import TypedDict + import sys + if sys.version_info >= (3, 11): + from typing import TypedDict + else: + from typing_extensions import TypedDict ProjectOptions = TypedDict('ProjectOptions',{ 'path': str, @@ -122,7 +127,10 @@ def __init__(self, app: Sphinx): # Assume general build directory is the doctree directory without the last component. # We strip off any trailing slashes so that dirname correctly drops the last part. # This can be overridden with the breathe_build_directory config variable - self._default_build_dir = str(app.doctreedir.parent) + if isinstance(app.doctreedir, str): + self._default_build_dir = os.path.dirname(app.doctreedir.rstrip(os.sep)) + else: + self._default_build_dir = str(app.doctreedir.parent) self.project_count = 0 self.project_info_store: dict[str, ProjectInfo] = {} self.project_info_for_auto_store: dict[str, AutoProjectInfo] = {} diff --git a/breathe/renderer/filter.py b/breathe/renderer/filter.py index 160901bf..3114ebbe 100644 --- a/breathe/renderer/filter.py +++ b/breathe/renderer/filter.py @@ -50,11 +50,16 @@ from sphinx.application import Sphinx import os -from typing import Any, Callable, Literal, SupportsIndex, TYPE_CHECKING +from typing import Any, Callable, Literal, SupportsIndex, TYPE_CHECKING, TypeVar from collections.abc import Container, Iterable, Mapping if TYPE_CHECKING: - from typing_extensions import TypeAlias, TypeVar + import sys + + if sys.version_info >= (3, 11): + from typing import TypeAlias + else: + from typing_extensions import TypeAlias from breathe import renderer from breathe.directives.class_like import DoxClassOptions from breathe.directives.content_block import DoxContentBlockOptions @@ -554,12 +559,14 @@ def filter(nstack: NodeStack) -> bool: ) else: - ext = self.app.config.breathe_implementation_filename_extensions + ext = tuple(self.app.config.breathe_implementation_filename_extensions) def filter(nstack: NodeStack) -> bool: parent = nstack.parent - return isinstance(parent, parser.Node_CompoundType) and ( - parent.kind != parser.CompoundKind.file or parent.name.endswith(ext) + return ( + node_matches(nstack) + and isinstance(parent, parser.Node_CompoundType) + and (parent.kind != parser.CompoundKind.file or not parent.name.endswith(ext)) ) return filter diff --git a/breathe/renderer/sphinxrenderer.py b/breathe/renderer/sphinxrenderer.py index 166b396e..cd4d084e 100644 --- a/breathe/renderer/sphinxrenderer.py +++ b/breathe/renderer/sphinxrenderer.py @@ -680,11 +680,11 @@ def get_filename(node) -> Optional[str]: assert self.context is not None node_stack = self.context.node_stack - node = node_stack[0] + node = node_stack[0].value # An enumvalueType node doesn't have location, so use its parent node # for detecting the domain instead. if isinstance(node, (str,parser.Node_enumvalueType)): - node = node_stack[1] + node = node_stack[1].value filename = get_filename(node) if not filename and isinstance(node,parser.Node_CompoundType): file_data = self.compound_parser.parse(node.refid) @@ -1788,31 +1788,31 @@ def visit_docsimplesect(self, node: parser.Node_docSimpleSectType) -> list[Node] # and it will be pulled up later nodelist = self.render_iterable(node.para) - if node.type in (parser.DoxSimpleSectKind.pre, parser.DoxSimpleSectKind.post, parser.DoxSimpleSectKind.return_): + if node.kind in (parser.DoxSimpleSectKind.pre, parser.DoxSimpleSectKind.post, parser.DoxSimpleSectKind.return_): return [ nodes.field_list( "", nodes.field( "", - nodes.field_name("", nodes.Text(node.type.value)), + nodes.field_name("", nodes.Text(node.kind.value)), nodes.field_body("", *nodelist), ), ) ] - elif node.type == parser.DoxSimpleSectKind.warning: + elif node.kind == parser.DoxSimpleSectKind.warning: return [nodes.warning("", *nodelist)] - elif node.type == parser.DoxSimpleSectKind.note: + elif node.kind == parser.DoxSimpleSectKind.note: return [nodes.note("", *nodelist)] - elif node.type == parser.DoxSimpleSectKind.see: + elif node.kind == parser.DoxSimpleSectKind.see: return [addnodes.seealso("", *nodelist)] - elif node.type == parser.DoxSimpleSectKind.remark: - nodelist.insert(0, nodes.title("", nodes.Text(node.type.value.capitalize()))) - return [nodes.admonition("", classes=[node.type.value], *nodelist)] + elif node.kind == parser.DoxSimpleSectKind.remark: + nodelist.insert(0, nodes.title("", nodes.Text(node.kind.value.capitalize()))) + return [nodes.admonition("", classes=[node.kind.value], *nodelist)] - if node.type == parser.DoxSimpleSectKind.par: + if node.kind == parser.DoxSimpleSectKind.par: text = self.render(node.title) else: - text = [nodes.Text(node.type.value.capitalize())] + text = [nodes.Text(node.kind.value.capitalize())] # TODO: is this working as intended? there is something strange with the types title = nodes.strong("", "", *text) diff --git a/setup.cfg b/setup.cfg index 88eee913..44b9b1a6 100644 --- a/setup.cfg +++ b/setup.cfg @@ -9,4 +9,5 @@ per-file-ignores = breathe/parser/index.py:E305 [bdist_wheel] +py-limited-api = cp38 universal = 0 diff --git a/setup.py b/setup.py index 515aa882..855ed11a 100644 --- a/setup.py +++ b/setup.py @@ -9,10 +9,12 @@ import sys import os.path +from setuptools.command.build import build from setuptools.command.build_ext import build_ext from distutils import log from distutils.dep_util import newer_group from distutils.dir_util import mkpath +from distutils.util import split_quoted # add xml_parser_generator to the import path list base_dir = os.path.dirname(os.path.realpath(__file__)) @@ -31,12 +33,35 @@ requires = ["Sphinx>=4.0,!=5.0.0", "docutils>=0.12"] if sys.version_info < (3, 8): - print("ERROR: Sphinx requires at least Python 3.8 to run.") + print("ERROR: Breathe requires at least Python 3.8 to run.") sys.exit(1) +extra_user_options = [ + ('cpp-opts=',None, + 'extra command line arguments for the compiler'), + ('ld-opts=',None, + 'extra command line arguments for the linker')] + +class CustomBuild(build): + """Add extra parameters for 'build' to pass to 'build_ext' + """ + user_options = build.user_options + extra_user_options + + def initialize_options(self): + super().initialize_options() + self.cpp_opts = '' + self.ld_opts = '' + + def finalize_options(self): + super().finalize_options() + self.cpp_opts = split_quoted(self.cpp_opts) + self.ld_opts = split_quoted(self.ld_opts) + class CustomBuildExt(build_ext): - """Extend build_ext to automatically generate parser.c""" + """Extend build_ext to automatically generate _parser.c""" + + user_options = build_ext.user_options + extra_user_options SCHEMA_FILE = os.path.join('xml_parser_generator','schema.json') MODULE_TEMPLATE = os.path.join('xml_parser_generator','module_template.c') @@ -45,6 +70,22 @@ class CustomBuildExt(build_ext): DEPENDENCIES = [SCHEMA_FILE,MODULE_TEMPLATE,STUBS_TEMPLATE,MAKER_SOURCE] + def initialize_options(self): + super().initialize_options() + self.cpp_opts = None + self.ld_opts = None + + def finalize_options(self): + if self.cpp_opts is not None: + self.cpp_opts = split_quoted(self.cpp_opts) + if self.ld_opts is not None: + self.ld_opts = split_quoted(self.ld_opts) + + self.set_undefined_options('build', + ('cpp_opts','cpp_opts'), + ('ld_opts','ld_opts')) + super().finalize_options() + def build_extensions(self): assert len(self.extensions) == 1 @@ -122,7 +163,7 @@ def build_extensions(self): depends=CustomBuildExt.DEPENDENCIES, libraries=['expat'], define_macros=[ - ('PARSER_PY_LIMITED_API','0x03070000'), # set Stable ABI version to 3.7 + ('PARSER_PY_LIMITED_API','0x03080000'), # set Stable ABI version to 3.8 ('MODULE_NAME','_parser'), ('FULL_MODULE_STR','"breathe._parser"') ], @@ -136,5 +177,5 @@ def build_extensions(self): ], }, install_requires=requires, - cmdclass={'build_ext': CustomBuildExt} + cmdclass={'build': CustomBuild, 'build_ext': CustomBuildExt} ) diff --git a/tests/data/examples/test_array/compare.xml b/tests/data/examples/test_array/compare.xml new file mode 100644 index 00000000..ab1001bb --- /dev/null +++ b/tests/data/examples/test_array/compare.xml @@ -0,0 +1,17 @@ + + + + + int fooint a[5] + + My function. + + + + int barint nint a[] + + My other function. + Test:This declaration is supposed to be int bar(int n, int a[static n]); But, Sphinx fails to recognize int a[static n]) as a C specific array syntax + + + diff --git a/tests/data/examples/test_array/input.rst b/tests/data/examples/test_array/input.rst new file mode 100644 index 00000000..52e1cc66 --- /dev/null +++ b/tests/data/examples/test_array/input.rst @@ -0,0 +1,2 @@ +.. doxygenfunction:: foo +.. doxygenfunction:: bar diff --git a/tests/data/examples/test_c_enum/compare.xml b/tests/data/examples/test_c_enum/compare.xml new file mode 100644 index 00000000..99981e89 --- /dev/null +++ b/tests/data/examples/test_c_enum/compare.xml @@ -0,0 +1,139 @@ + + + + + enum GSM_BackupFormat + + Backup data. + Values: + + enumerator GSM_Backup_Auto + + Compatibility with old gboolean used instead of format. + File type is guessed for extension, non unicode format used for Gammu backup. + + + + enumerator GSM_Backup_AutoUnicode + + Compatibility with old gboolean used instead of format. + File type is guessed for extension, unicode format used for Gammu backup. + + + + enumerator GSM_Backup_LMB + + LMB format, compatible with Logo manager, can store phonebooks and logos. + + + + enumerator GSM_Backup_VCalendar + + vCalendar standard, can store todo and calendar entries. + + + + enumerator GSM_Backup_VCard + + vCard standard, can store phone phonebook entries. + + + + enumerator GSM_Backup_LDIF + + LDIF (LDAP Data Interchange Format), can store phone phonebook entries. + + + + enumerator GSM_Backup_ICS + + iCalendar standard, can store todo and calendar entries. + + + + enumerator GSM_Backup_Gammu + + Gammu own format can store almost anything from phone. + This is ASCII version of the format, Unicode strings are HEX encoded. Use GSM_Backup_GammuUCS2 instead if possible. + + + + enumerator GSM_Backup_GammuUCS2 + + Gammu own format can store almost anything from phone. + This is UCS2-BE version of the format. + + + + enumerator GSM_Backup_VNote + + vNote standard, can store phone notes. + + + + + + enumerator GSM_Backup_Auto + + Compatibility with old gboolean used instead of format. + File type is guessed for extension, non unicode format used for Gammu backup. + + + + enumerator GSM_Backup_AutoUnicode + + Compatibility with old gboolean used instead of format. + File type is guessed for extension, unicode format used for Gammu backup. + + + + enumerator GSM_Backup_LMB + + LMB format, compatible with Logo manager, can store phonebooks and logos. + + + + enumerator GSM_Backup_VCalendar + + vCalendar standard, can store todo and calendar entries. + + + + enumerator GSM_Backup_VCard + + vCard standard, can store phone phonebook entries. + + + + enumerator GSM_Backup_LDIF + + LDIF (LDAP Data Interchange Format), can store phone phonebook entries. + + + + enumerator GSM_Backup_ICS + + iCalendar standard, can store todo and calendar entries. + + + + enumerator GSM_Backup_Gammu + + Gammu own format can store almost anything from phone. + This is ASCII version of the format, Unicode strings are HEX encoded. Use GSM_Backup_GammuUCS2 instead if possible. + + + + enumerator GSM_Backup_GammuUCS2 + + Gammu own format can store almost anything from phone. + This is UCS2-BE version of the format. + + + + enumerator GSM_Backup_VNote + + vNote standard, can store phone notes. + + + diff --git a/tests/data/examples/test_c_enum/input.rst b/tests/data/examples/test_c_enum/input.rst new file mode 100644 index 00000000..ad90b8c1 --- /dev/null +++ b/tests/data/examples/test_c_enum/input.rst @@ -0,0 +1,11 @@ +.. doxygenenum:: GSM_BackupFormat +.. doxygenenumvalue:: GSM_Backup_Auto +.. doxygenenumvalue:: GSM_Backup_AutoUnicode +.. doxygenenumvalue:: GSM_Backup_LMB +.. doxygenenumvalue:: GSM_Backup_VCalendar +.. doxygenenumvalue:: GSM_Backup_VCard +.. doxygenenumvalue:: GSM_Backup_LDIF +.. doxygenenumvalue:: GSM_Backup_ICS +.. doxygenenumvalue:: GSM_Backup_Gammu +.. doxygenenumvalue:: GSM_Backup_GammuUCS2 +.. doxygenenumvalue:: GSM_Backup_VNote diff --git a/tests/data/examples/test_class/compare.xml b/tests/data/examples/test_class/compare.xml new file mode 100644 index 00000000..f8f52e3d --- /dev/null +++ b/tests/data/examples/test_class/compare.xml @@ -0,0 +1,254 @@ + + + + + Functions + + template<typename T>void f0 + + + + + template<>void f0<std::string> + + + + + + class OuterClass + + class outside of namespace + + class InnerClass + + inner class + + + + + + class ClassTest + + class outside of namespace + + Public Functions + + void functionint myParameter + + non-namespaced class function + More details in the header file. + More documentation in the impl file + + + + void anotherFunction + + non-namespaced class other function + More documentation in the impl file + + + + virtual void publicFunction const = 0 + + namespaced class function + + + + virtual void undocumentedPublicFunction const = 0 + + + + + + Protected Functions + + inline void protectedFunction + + A protected function. + + + + inline void undocumentedProtectedFunction + + + + + + Private Functions + + virtual void privateFunction const = 0 + + This is a private function. + + + + virtual void undocumentedPrivateFunction const = 0 + + + + + + class PrivateClass + + A private class. + + + + struct PrivateStruct + + A private struct. + + + + class ProtectedClass + + A protected class. + + + + struct ProtectedStruct + + A protected struct. + + + + class PublicClass + + A public class. + + + + struct PublicStruct + + A public struct. + + + + class UndocumentedPrivateClass + + + + + struct UndocumentedPrivateStruct + + + + + class UndocumentedProtectedClass + + + + + struct UndocumentedProtectedStruct + + + + + class UndocumentedPublicClass + + + + + struct UndocumentedPublicStruct + + + + + + + namespace TestNamespaceClasses + + + class ClassTest + + second class inside of namespace + + Public Functions + + inline void function + + second namespaced class function + + + + inline void anotherFunction + + second namespaced class other function + + + + + + + class NamespacedClassTest + + first class inside of namespace + + Public Functions + + virtual void function const = 0 + + namespaced class function + + + + inline explicit NamespacedClassTest + + + + + inline void anotherFunction + + namespaced class other function + + + + + Public Static Functions + + static void functionS + + + + + + + + + + namespace NS1 + + + Functions + + template<typename T>void f1 + + + + + template<>void f1<std::string> + + + + + + namespace NS2 + + + Functions + + template<typename T>void f2 + + + + + template<>void f2<std::string> + + + + + + + + + diff --git a/tests/data/examples/test_class/input.rst b/tests/data/examples/test_class/input.rst new file mode 100644 index 00000000..81fdc3e8 --- /dev/null +++ b/tests/data/examples/test_class/input.rst @@ -0,0 +1 @@ +.. doxygenfile:: class.h diff --git a/tests/data/examples/test_code_blocks/compare.xml b/tests/data/examples/test_code_blocks/compare.xml new file mode 100644 index 00000000..8cf47862 --- /dev/null +++ b/tests/data/examples/test_code_blocks/compare.xml @@ -0,0 +1,36 @@ + + + + + Functions + + void with_standard_code_block + + A function with an unannotated code block with C/C++ code. + char* buffer = new char[42]; +int charsAdded = sprintf(buffer, "Tabs are normally %d spaces\n", 8); + + + + void with_unannotated_cmake_code_block + + A function with an unannotated code block with non-C/C++ code. + set(user_list A B C) +foreach(element ${user_list}) + message(STATUS "Element is ${element}") +endforeach() + Another code-block that explicitly remains not highlighted. Show this as is. + + + + void with_annotated_cmake_code_block + + A function with an annotated cmake code block. + set(user_list A B C) +foreach(element ${user_list}) + message(STATUS "Element is ${element}") +endforeach() + + + + diff --git a/tests/data/examples/test_code_blocks/input.rst b/tests/data/examples/test_code_blocks/input.rst new file mode 100644 index 00000000..179d4e76 --- /dev/null +++ b/tests/data/examples/test_code_blocks/input.rst @@ -0,0 +1 @@ +.. doxygenfile:: code_blocks.h diff --git a/tests/data/examples/test_cpp_concept/compare.xml b/tests/data/examples/test_cpp_concept/compare.xml new file mode 100644 index 00000000..60fdf8d4 --- /dev/null +++ b/tests/data/examples/test_cpp_concept/compare.xml @@ -0,0 +1,9 @@ + + + + + template<typename T>concept Hashable + + + + diff --git a/tests/data/examples/test_cpp_concept/input.rst b/tests/data/examples/test_cpp_concept/input.rst new file mode 100644 index 00000000..50c93490 --- /dev/null +++ b/tests/data/examples/test_cpp_concept/input.rst @@ -0,0 +1 @@ +.. doxygenconcept:: Hashable diff --git a/tests/data/examples/test_cpp_enum/compare.xml b/tests/data/examples/test_cpp_enum/compare.xml new file mode 100644 index 00000000..e8ce0c17 --- /dev/null +++ b/tests/data/examples/test_cpp_enum/compare.xml @@ -0,0 +1,48 @@ + + + + + enum Unscoped + + Values: + + enumerator UnscopedEnumerator + + + + + + + enum class ScopedStruct : int + + Values: + + enumerator Enumerator + + + + + + + enum class ScopedClass : int + + Values: + + enumerator Enumerator + + + + + + + enum class ScopedClassNoUnderlying + + Values: + + enumerator Enumerator + + + + + + diff --git a/tests/data/examples/test_cpp_enum/input.rst b/tests/data/examples/test_cpp_enum/input.rst new file mode 100644 index 00000000..d69124b8 --- /dev/null +++ b/tests/data/examples/test_cpp_enum/input.rst @@ -0,0 +1,4 @@ +.. doxygenenum:: Unscoped +.. doxygenenum:: ScopedStruct +.. doxygenenum:: ScopedClass +.. doxygenenum:: ScopedClassNoUnderlying diff --git a/tests/data/examples/test_define/compare.xml b/tests/data/examples/test_define/compare.xml new file mode 100644 index 00000000..f8f45ac1 --- /dev/null +++ b/tests/data/examples/test_define/compare.xml @@ -0,0 +1,49 @@ + + + + + USE_STUFF + + A simple define without a value. + + + + MAX_LENGTH + + A define with a simple value. + + + + MAXIMUMAB + + A define with some parameters. + + + Parameters + + + + A – The parameter A + + + B – The parameter B + + + + + + Returns + + The maximum of A and B + + + + + + + SWAPAB + + A define which spans multiple lines. + + + diff --git a/tests/data/examples/test_define/input.rst b/tests/data/examples/test_define/input.rst new file mode 100644 index 00000000..d3b994c9 --- /dev/null +++ b/tests/data/examples/test_define/input.rst @@ -0,0 +1,4 @@ +.. doxygendefine:: USE_STUFF +.. doxygendefine:: MAX_LENGTH +.. doxygendefine:: MAXIMUM +.. doxygendefine:: SWAP diff --git a/tests/data/examples/test_dot_graphs/compare.xml b/tests/data/examples/test_dot_graphs/compare.xml new file mode 100644 index 00000000..1f32572b --- /dev/null +++ b/tests/data/examples/test_dot_graphs/compare.xml @@ -0,0 +1,17 @@ + + + + + page dotgraphs + +
+ Using @dot command +
basic graph elements
+
+
+ Using @dotfile command +
Captions go here
+
+
+
+
diff --git a/tests/data/examples/test_dot_graphs/input.rst b/tests/data/examples/test_dot_graphs/input.rst new file mode 100644 index 00000000..b2f78a22 --- /dev/null +++ b/tests/data/examples/test_dot_graphs/input.rst @@ -0,0 +1 @@ +.. doxygenpage:: dotgraphs diff --git a/tests/data/examples/test_headings/compare.xml b/tests/data/examples/test_headings/compare.xml new file mode 100644 index 00000000..61db6705 --- /dev/null +++ b/tests/data/examples/test_headings/compare.xml @@ -0,0 +1,15 @@ + + + + + class HeadingsTest + + This is a documentation. + This is more documentation. + Header + Text + Header Bold Header Text + Text + + + diff --git a/tests/data/examples/test_headings/input.rst b/tests/data/examples/test_headings/input.rst new file mode 100644 index 00000000..b42cb866 --- /dev/null +++ b/tests/data/examples/test_headings/input.rst @@ -0,0 +1 @@ +.. doxygenclass:: HeadingsTest diff --git a/tests/data/examples/test_rst/compare.xml b/tests/data/examples/test_rst/compare.xml new file mode 100644 index 00000000..36f93979 --- /dev/null +++ b/tests/data/examples/test_rst/compare.xml @@ -0,0 +1,76 @@ + + + + + class TestClass + + first class inside of namespace + + Public Functions + + virtual void function const = 0 + + Inserting additional reStructuredText information. + This is some funky non-XML compliant text: <& !>< + This is just a standard verbatim block with code: + child = 0; + while( child = parent->IterateChildren( child ) ) + + This reStructuredText has been handled correctly. + + + + + virtual void rawVerbatim const = 0 + + Inserting additional reStructuredText information. + + This reStructuredText has been handled correctly. + + + + + virtual void rawLeadingAsteriskVerbatim const = 0 + + Inserting additional reStructuredText information. + Some example code:int example(int x) { + return x * 2; +} + + + + virtual void rawLeadingSlashesVerbatimint something const = 0 + + Some kind of method. + bool foo(bool something) { + return something; +}; + + Documentation using /// should begin and end in a blank line. + + + + Parameters + + something – a parameter + + + + + + + virtual void rawInlineVerbatim const = 0 + + Inserting an inline reStructuredText snippet. Linking to another function: TestClass::rawVerbatim() + + + + inline virtual void testFunction const + + Brief description. + + + + + + diff --git a/tests/data/examples/test_rst/input.rst b/tests/data/examples/test_rst/input.rst new file mode 100644 index 00000000..b1088cef --- /dev/null +++ b/tests/data/examples/test_rst/input.rst @@ -0,0 +1,2 @@ +.. doxygenclass:: TestClass + :members: diff --git a/tests/data/examples/test_template_class_non_type/compare.xml b/tests/data/examples/test_template_class_non_type/compare.xml new file mode 100644 index 00000000..aa345e5c --- /dev/null +++ b/tests/data/examples/test_template_class_non_type/compare.xml @@ -0,0 +1,78 @@ + + + + + template<typename T, typename U, int N>class anothertemplateclass + + a class with three template parameters + + + Template Parameters + + + + T – this is the first template parameter + + + U – this is the second template parameter + + + N – this is the third template parameter, it is a non-type parameter + + + + + + + Public Functions + + inline anothertemplateclass + + default constructor + + + + inline anothertemplateclassT const &m1U const &m2 + + constructor with two template argument + + + Parameters + + + + m1 – first argument + + + m2 – second argument + + + + + + + + + U methodT const &t + + member accepting template argument and returning template argument + + + Parameters + + t – argument + + + + Returns + + returns value of type U + + + + + + + + + diff --git a/tests/data/examples/test_template_class_non_type/input.rst b/tests/data/examples/test_template_class_non_type/input.rst new file mode 100644 index 00000000..be6520f7 --- /dev/null +++ b/tests/data/examples/test_template_class_non_type/input.rst @@ -0,0 +1,2 @@ +.. doxygenclass:: anothertemplateclass + :members: diff --git a/tests/test_examples.py b/tests/test_examples.py new file mode 100644 index 00000000..26a72329 --- /dev/null +++ b/tests/test_examples.py @@ -0,0 +1,192 @@ +from xml.parsers import expat +import pytest +import pathlib +import subprocess +import shutil +import enum +import dataclasses + + +EXAMPLES_SOURCE_DIR = pathlib.Path(__file__).parent.parent / 'examples' / 'specific' + +DOXYFILE_TEMPLATE = """ +PROJECT_NAME = "example" +HAVE_DOT = YES +DOTFILE_DIRS = "{source_dir}" +GENERATE_LATEX = NO +GENERATE_MAN = NO +GENERATE_RTF = NO +CASE_SENSE_NAMES = NO +INPUT = {input} +OUTPUT_DIRECTORY = "{output}" +QUIET = YES +JAVADOC_AUTOBRIEF = YES +GENERATE_HTML = NO +GENERATE_XML = YES +ALIASES = "rst=\\verbatim embed:rst" +ALIASES += "endrst=\\endverbatim" +ALIASES += "inlinerst=\\verbatim embed:rst:inline" +""" + +C_FILE_SUFFIXES = frozenset(('.h', '.c', '.cpp')) +IGNORED_ELEMENTS = frozenset(('target', 'index')) + +BUFFER_SIZE = 0x1000 + + +class XMLEventType(enum.Enum): + E_START = enum.auto() + E_END = enum.auto() + E_TEXT = enum.auto() + +@dataclasses.dataclass +class XMLElement: + name: str + attr: dict[str,str] + line_no: int + column_no: int + +@dataclasses.dataclass +class XMLTextElement: + value: str + line_no: int + column_no: int + + +def xml_stream(infile): + """XML pull parser. + + This is similar to xml.dom.pulldom.parse, except the locations of the + elements are tracked.""" + p = expat.ParserCreate() + + pending_events = [] + pending_text = '' + + def dispatch_text(): + nonlocal pending_text + + if pending_text: + pending_events.append(( + XMLEventType.E_TEXT, + XMLTextElement( + pending_text, + p.CurrentLineNumber, + p.CurrentColumnNumber))) + pending_text = '' + + def handle_start(name,attr): + dispatch_text() + + pending_events.append(( + XMLEventType.E_START, + XMLElement( + name, + attr, + p.CurrentLineNumber, + p.CurrentColumnNumber))) + p.StartElementHandler = handle_start + + def handle_end(_): + dispatch_text() + pending_events.append((XMLEventType.E_END,None)) + p.EndElementHandler = handle_end + + def handle_text(data): + nonlocal pending_text + pending_text += data + p.CharacterDataHandler = handle_text + + while True: + data = infile.read(BUFFER_SIZE) + if not data: + dispatch_text() + p.Parse(data,True) + yield from pending_events + break + p.Parse(data,False) + if pending_events: + yield from pending_events + pending_events.clear() + + +def get_individual_tests(): + return (pathlib.Path(__file__).parent / "data" / "examples").glob("test_*") + +def filter_c_files(name): + for p in EXAMPLES_SOURCE_DIR.glob(name + '.*'): + if p.suffix in C_FILE_SUFFIXES: + full = str(p) + if '"' in full: + raise ValueError('quotations marks not allowed in path names') + yield f'"{full}"' + +def filtered_xml(infile): + ignore = 0 + for event, node in xml_stream(infile): + if event == XMLEventType.E_START: + if ignore or node.name in IGNORED_ELEMENTS: + ignore += 1 + else: + yield event, node + elif event == XMLEventType.E_END: + if ignore: + ignore -= 1 + else: + yield event, node + else: + if not ignore: + text = node.value.strip() + if text: + node.value = text + yield event, node + +@pytest.mark.parametrize('test_input', get_individual_tests()) +def test_example(make_app, tmp_path, test_input): + doxygen = shutil.which('doxygen') + if doxygen is None: + raise ValueError('cannot find doxygen executable') + + doxyfile = tmp_path / "Doxyfile" + doxyfile.write_text(DOXYFILE_TEMPLATE.format( + input=" ".join(filter_c_files(test_input.stem.removeprefix('test_'))), + source_dir=EXAMPLES_SOURCE_DIR, + output=tmp_path + )) + (tmp_path / "conf.py").touch() + shutil.copyfile(test_input / "input.rst", tmp_path / "index.rst") + + subprocess.run([doxygen, doxyfile], check = True) + + make_app( + buildername='xml', + srcdir=tmp_path, + confoverrides={ + 'project': 'test', + 'breathe_projects': {'example': str(tmp_path / "xml")}, + 'breathe_default_project': 'example', + 'breathe_show_include': False, + 'extensions': ['breathe','sphinx.ext.graphviz']}).build() + + event_str = { + XMLEventType.E_START: 'element start', + XMLEventType.E_END: 'element end', + XMLEventType.E_TEXT: 'text' + } + + with open(tmp_path / '_build' / 'xml' / 'index.xml') as o_file, open(test_input / 'compare.xml') as c_file: + for o, c in zip(filtered_xml(o_file),filtered_xml(c_file)): + o_type, o_node = o + c_type, c_node = c + assert o_type == c_type, f'at line {o_node.line_no}: found {event_str[o_type]} when expecting {event_str[c_type]}' + + if o_type == XMLEventType.E_START: + assert o_node.name == c_node.name, f'wrong tag at line {o_node.line_no}: expected {c_node.name}, found {o_node.name}' + + # ignore extra attributes in o_node + for key, value in c_node.attr.items(): + assert key in o_node.attr, f'missing attribute at line {o_node.line_no}: {key}' + o_value = o_node.attr[key] + assert o_value == value, f'wrong value for attribute "{key}" at line {o_node.line_no}: expected "{value}", found "{o_value}"' + elif o_type == XMLEventType.E_TEXT: + assert o_node.value == c_node.value, f'wrong content at line {o_node.line_no}: expected "{c_node}", found "{o_node}"' diff --git a/tests/test_renderer.py b/tests/test_renderer.py index 20ba51b7..6b3846ab 100644 --- a/tests/test_renderer.py +++ b/tests/test_renderer.py @@ -212,7 +212,7 @@ def render( app.config.breathe_debug_trace_directives = False app.config.breathe_debug_trace_doxygen_ids = False app.config.breathe_debug_trace_qualification = False - renderer = SphinxRenderer( + r = SphinxRenderer( app, None, # project_info [], # node_stack @@ -222,8 +222,8 @@ def render( compound_parser, (lambda nstack: True), ) - renderer.context = MockContext(app, [member_def], domain, options) - return renderer.render(member_def) + r.context = MockContext(app, [renderer.TaggedNode(None,member_def)], domain, options) + return r.render(member_def) def test_render_func(app): diff --git a/xml_parser_generator/schema.json b/xml_parser_generator/schema.json index 06a928f4..91f66417 100644 --- a/xml_parser_generator/schema.json +++ b/xml_parser_generator/schema.json @@ -350,10 +350,10 @@ "codelineType": { "kind": "tag_only_element", "attributes": { - "external": {"type": "#DoxBool"}, - "lineno": {"type": "#integer"}, - "refid": {"type": "#string"}, - "refkind": {"type": "DoxRefKind"} + "external": {"type": "#DoxBool", "optional": true}, + "lineno": {"type": "#integer", "optional": true}, + "refid": {"type": "#string", "optional": true}, + "refkind": {"type": "DoxRefKind", "optional": true} }, "children": { "highlight": {"type": "highlightType", "is_list": true, "min_items": 0} @@ -471,13 +471,13 @@ "kind": "tag_only_element", "any_attr": true, "attributes": { - "align": {"type": "DoxAlign"}, - "class": {"type": "#string", "py_name": "class_"}, - "colspan": {"type": "#integer"}, - "rowspan": {"type": "#integer"}, + "align": {"type": "DoxAlign", "optional": true}, + "class": {"type": "#string", "py_name": "class_", "optional": true}, + "colspan": {"type": "#integer", "optional": true}, + "rowspan": {"type": "#integer", "optional": true}, "thead": {"type": "#DoxBool"}, - "valign": {"type": "DoxVerticalAlign"}, - "width": {"type": "#string"} + "valign": {"type": "DoxVerticalAlign", "optional": true}, + "width": {"type": "#string", "optional": true} }, "children": { "para": {"type": "docParaType", "is_list": true, "min_items": 0} @@ -1053,8 +1053,8 @@ "kind": "list_element", "min_items": 1, "attributes": { - "type": {"type": "DoxOlType"}, - "start": {"type": "#integer"} + "type": {"type": "DoxOlType", "optional": true}, + "start": {"type": "#integer", "optional": true} }, "content": { "listitem": "docListItemType" @@ -1063,7 +1063,7 @@ "docSimpleSectType": { "kind": "tag_only_element", "attributes": { - "type": {"type": "DoxSimpleSectKind"} + "kind": {"type": "DoxSimpleSectKind"} }, "children": { "title": {"type": "docTitleType", "min_items": 0}, @@ -1083,7 +1083,7 @@ "attributes": { "rows": {"type": "#integer"}, "cols": {"type": "#integer"}, - "width": {"type": "#string"} + "width": {"type": "#string", "optional": true} }, "children": { "caption": {"type": "docCaptionType", "min_items": 0}, From 997e1a873b233f6108c0cc3ab121428f62f8399e Mon Sep 17 00:00:00 2001 From: Rouslan Korneychuk Date: Tue, 5 Dec 2023 02:49:24 -0500 Subject: [PATCH 25/65] More tests and another fix --- breathe/renderer/sphinxrenderer.py | 15 +++--- .../examples/test_cpp_friendclass/compare.xml | 28 +++++++++++ .../examples/test_cpp_friendclass/input.rst | 6 +++ .../test_cpp_inherited_members/compare.xml | 50 +++++++++++++++++++ .../extra_dox_opts.txt | 1 + .../test_cpp_inherited_members/input.rst | 1 + tests/test_examples.py | 8 ++- 7 files changed, 100 insertions(+), 9 deletions(-) create mode 100644 tests/data/examples/test_cpp_friendclass/compare.xml create mode 100644 tests/data/examples/test_cpp_friendclass/input.rst create mode 100644 tests/data/examples/test_cpp_inherited_members/compare.xml create mode 100644 tests/data/examples/test_cpp_inherited_members/extra_dox_opts.txt create mode 100644 tests/data/examples/test_cpp_inherited_members/input.rst diff --git a/breathe/renderer/sphinxrenderer.py b/breathe/renderer/sphinxrenderer.py index cd4d084e..47da49ca 100644 --- a/breathe/renderer/sphinxrenderer.py +++ b/breathe/renderer/sphinxrenderer.py @@ -893,9 +893,10 @@ def get_fully_qualified_name(self): if isinstance(node.value,parser.Node_CompoundType) and node.value.kind == parser.CompoundKind.namespace: names.append(node.value.name) - for node in node_stack: - if isinstance(node.value,parser.Node_refType) and len(names) == 0: - return ''.join(node.value) + for tval in node_stack: + node = tval.value + if isinstance(node,parser.Node_refType) and len(names) == 0: + return ''.join(node) if ( isinstance(node,parser.Node_CompoundType) and node.kind not in [parser.CompoundKind.file, parser.CompoundKind.namespace, parser.CompoundKind.group] ) or isinstance(node,parser.Node_memberdefType): @@ -994,7 +995,7 @@ def detaileddescription(self, node) -> list[Node]: admonitions: list[Node] = [] def pullup(node, typ, dest): - for n in node.findall(typ): + for n in list(node.findall(typ)): del n.parent[n.parent.index(n)] dest.append(n) @@ -2519,13 +2520,13 @@ def visit_templateparamlist(self, node: parser.Node_templateparamlistType) -> li def visit_docparamlist(self, node: parser.Node_docParamListType) -> list[Node]: """Parameter/Exception/TemplateParameter documentation""" - has_retval = sphinx.version_info[0:2] < (4, 3) # pyright: ignore + # retval support available on Sphinx >= 4.3 + has_retval = sphinx.version_info[0:2] >= (4, 3) # pyright: ignore fieldListName = { parser.DoxParamListKind.param: "param", parser.DoxParamListKind.exception: "throws", parser.DoxParamListKind.templateparam: "tparam", - # retval support available on Sphinx >= 4.3 - parser.DoxParamListKind.retval: "returns" if has_retval else "retval", + parser.DoxParamListKind.retval: "retval" if has_retval else "returns", } # https://docutils.sourceforge.io/docs/ref/doctree.html#field-list diff --git a/tests/data/examples/test_cpp_friendclass/compare.xml b/tests/data/examples/test_cpp_friendclass/compare.xml new file mode 100644 index 00000000..ad7e269a --- /dev/null +++ b/tests/data/examples/test_cpp_friendclass/compare.xml @@ -0,0 +1,28 @@ + + + + + struct A + + + + + struct B + + + + + struct C + + + Friends + + friend class A + + + friend struct B + + + + + diff --git a/tests/data/examples/test_cpp_friendclass/input.rst b/tests/data/examples/test_cpp_friendclass/input.rst new file mode 100644 index 00000000..5a96b09d --- /dev/null +++ b/tests/data/examples/test_cpp_friendclass/input.rst @@ -0,0 +1,6 @@ +.. doxygenstruct:: A +.. doxygenstruct:: B + +.. doxygenstruct:: C + :members: + :undoc-members: diff --git a/tests/data/examples/test_cpp_inherited_members/compare.xml b/tests/data/examples/test_cpp_inherited_members/compare.xml new file mode 100644 index 00000000..43d6f961 --- /dev/null +++ b/tests/data/examples/test_cpp_inherited_members/compare.xml @@ -0,0 +1,50 @@ + + + + + class Base + + Base class. + Subclassed by A, B + + Public Functions + + void f_issue_356 + + Base-class member function. + + + + + + + class A : public Base + + Class A. + + Public Functions + + void f_issue_356 + + Base-class member function. + + + + + + + class B : public Base + + Class B. + + Public Functions + + void f_issue_356 + + Base-class member function. + + + + + + diff --git a/tests/data/examples/test_cpp_inherited_members/extra_dox_opts.txt b/tests/data/examples/test_cpp_inherited_members/extra_dox_opts.txt new file mode 100644 index 00000000..075cb08d --- /dev/null +++ b/tests/data/examples/test_cpp_inherited_members/extra_dox_opts.txt @@ -0,0 +1 @@ +INLINE_INHERITED_MEMB = YES diff --git a/tests/data/examples/test_cpp_inherited_members/input.rst b/tests/data/examples/test_cpp_inherited_members/input.rst new file mode 100644 index 00000000..3e99c9e2 --- /dev/null +++ b/tests/data/examples/test_cpp_inherited_members/input.rst @@ -0,0 +1 @@ +.. doxygenfile:: cpp_inherited_members.h diff --git a/tests/test_examples.py b/tests/test_examples.py index 26a72329..2ba061bc 100644 --- a/tests/test_examples.py +++ b/tests/test_examples.py @@ -148,11 +148,15 @@ def test_example(make_app, tmp_path, test_input): raise ValueError('cannot find doxygen executable') doxyfile = tmp_path / "Doxyfile" - doxyfile.write_text(DOXYFILE_TEMPLATE.format( + doxycontent = DOXYFILE_TEMPLATE.format( input=" ".join(filter_c_files(test_input.stem.removeprefix('test_'))), source_dir=EXAMPLES_SOURCE_DIR, output=tmp_path - )) + ) + extra_opts = test_input / 'extra_dox_opts.txt' + if extra_opts.exists(): + doxycontent += extra_opts.read_text() + doxyfile.write_text(doxycontent) (tmp_path / "conf.py").touch() shutil.copyfile(test_input / "input.rst", tmp_path / "index.rst") From 7163482c345108f2be815c9a4db39ebf0822f447 Mon Sep 17 00:00:00 2001 From: Rouslan Korneychuk Date: Wed, 6 Dec 2023 12:40:17 -0500 Subject: [PATCH 26/65] more tests, more fixes --- breathe/parser.py | 6 + breathe/renderer/filter.py | 6 +- breathe/renderer/sphinxrenderer.py | 17 ++- examples/specific/param_dirs.h | 6 + tests/data/classSample.xml | 51 +++---- tests/data/examples/test_alias/compare.xml | 23 ++++ .../examples/test_alias/extra_dox_opts.txt | 1 + tests/data/examples/test_alias/input.rst | 1 + tests/data/examples/test_array/compare.xml | 6 +- tests/data/examples/test_c_enum/compare.xml | 63 ++++++--- tests/data/examples/test_c_file/compare.xml | 130 ++++++++++++++++++ tests/data/examples/test_c_file/input.rst | 1 + tests/data/examples/test_class/compare.xml | 120 ++++++++++------ .../examples/test_code_blocks/compare.xml | 9 +- .../examples/test_cpp_concept/compare.xml | 3 +- tests/data/examples/test_cpp_enum/compare.xml | 24 ++-- .../examples/test_cpp_friendclass/compare.xml | 9 +- .../test_cpp_inherited_members/compare.xml | 18 ++- .../test_cpp_trailing_return_type/compare.xml | 21 +++ .../test_cpp_trailing_return_type/input.rst | 1 + tests/data/examples/test_define/compare.xml | 12 +- .../data/examples/test_dot_graphs/compare.xml | 4 +- tests/data/examples/test_group/compare.xml | 73 ++++++++++ tests/data/examples/test_group/input.rst | 4 + tests/data/examples/test_headings/compare.xml | 3 +- .../data/examples/test_param_dirs/compare.xml | 28 ++++ tests/data/examples/test_param_dirs/input.rst | 1 + tests/data/examples/test_rst/compare.xml | 21 ++- .../test_template_class_non_type/compare.xml | 12 +- tests/test_examples.py | 2 +- tests/test_filters.py | 62 +++------ 31 files changed, 552 insertions(+), 186 deletions(-) create mode 100644 examples/specific/param_dirs.h create mode 100644 tests/data/examples/test_alias/compare.xml create mode 100644 tests/data/examples/test_alias/extra_dox_opts.txt create mode 100644 tests/data/examples/test_alias/input.rst create mode 100644 tests/data/examples/test_c_file/compare.xml create mode 100644 tests/data/examples/test_c_file/input.rst create mode 100644 tests/data/examples/test_cpp_trailing_return_type/compare.xml create mode 100644 tests/data/examples/test_cpp_trailing_return_type/input.rst create mode 100644 tests/data/examples/test_group/compare.xml create mode 100644 tests/data/examples/test_group/input.rst create mode 100644 tests/data/examples/test_param_dirs/compare.xml create mode 100644 tests/data/examples/test_param_dirs/input.rst diff --git a/breathe/parser.py b/breathe/parser.py index 95db3e9d..4551d0fe 100644 --- a/breathe/parser.py +++ b/breathe/parser.py @@ -23,6 +23,12 @@ def node_repr(self: Node) -> str: return f'{cls.__name__}({fields})' Node.__repr__ = node_repr # type: ignore +def description_has_content(node: Node_descriptionType | None) -> bool: + if node is None: return False + if bool(node.title) or len(node) > 1: return True + if not len(node): return False + item = node[0] + return not isinstance(item,str) or (len(item) > 0 and not item.isspace()) class ParserError(RuntimeError): def __init__(self, message: str, filename: str, lineno: int | None = None): diff --git a/breathe/renderer/filter.py b/breathe/renderer/filter.py index 3114ebbe..12c8b354 100644 --- a/breathe/renderer/filter.py +++ b/breathe/renderer/filter.py @@ -293,9 +293,9 @@ def _create_public_members_filter( # If the user has specified the 'members' option with arguments then # we only pay attention to that and not to any other member settings members_str = options["members"] - if members_str and members_str.strip(): + if members_str and not members_str.isspace(): # Matches sphinx-autodoc behaviour of comma separated values - members = set([x.strip() for x in members_str.split(",")]) + members = frozenset([x.strip() for x in members_str.split(",")]) # Accept any nodes which don't have a "sectiondef" as a parent # or, if they do, only accept them if their names are in the @@ -326,7 +326,7 @@ def filter(nstack: NodeStack) -> bool: # Allow anything that isn't a Node_memberdefType, or if it is only # allow the ones with a description return (not isinstance(node, parser.Node_memberdefType)) or bool( - node.briefdescription or node.detaileddescription + parser.description_has_content(node.briefdescription) or parser.description_has_content(node.detaileddescription) ) return filter diff --git a/breathe/renderer/sphinxrenderer.py b/breathe/renderer/sphinxrenderer.py index 47da49ca..5f98e389 100644 --- a/breathe/renderer/sphinxrenderer.py +++ b/breathe/renderer/sphinxrenderer.py @@ -2541,11 +2541,8 @@ def visit_docparamlist(self, node: parser.Node_docParamListType) -> list[Node]: if len(paramNameNodes) != 0: nameNodes = [] for paramName in paramNameNodes: - # this is really a list of MixedContainer objects, i.e., a generic object - # we assume there is either 1 or 2 elements, if there is 2 the first is the - # parameter direction - assert len(paramName) == 1 or len(paramName) == 2, list(paramName) - thisName = self.render_tagged(paramName[-1]) + assert len(paramName) == 1 + thisName = self.render_tagged(paramName[0]) if len(nameNodes) != 0: if node.kind == parser.DoxParamListKind.exception: msg = "Doxygen \\exception commands with multiple names can not be" @@ -2557,12 +2554,14 @@ def visit_docparamlist(self, node: parser.Node_docParamListType) -> list[Node]: continue nameNodes.append(nodes.Text(", ")) nameNodes.extend(thisName) - if len(paramName) == 2: + if paramName.direction is not None: # note, each paramName node seems to have the same direction, # so just use the last one - dir = "".join(n.astext() for n in self.render_tagged(paramName[0])).strip() - assert dir in ("[in]", "[out]", "[inout]"), ">" + dir + "<" - parameterDirectionNodes = [nodes.strong(dir, dir), nodes.Text(" ", " ")] + dir = { + parser.DoxParamDir.in_: "[in]", + parser.DoxParamDir.out: "[out]", + parser.DoxParamDir.inout: "[inout]"}[paramName.direction] + parameterDirectionNodes = [nodes.strong(dir, dir), nodes.Text(" ")] # it seems that Sphinx expects the name to be a single node, # so let's make it that txt = fieldListName[node.kind] + " " diff --git a/examples/specific/param_dirs.h b/examples/specific/param_dirs.h new file mode 100644 index 00000000..a47a8aeb --- /dev/null +++ b/examples/specific/param_dirs.h @@ -0,0 +1,6 @@ +/** + * @param[in] i Input + * @param[out] o Output + * @param[in,out] io Input and output + */ +int process(void *i, void *o, void *io); diff --git a/tests/data/classSample.xml b/tests/data/classSample.xml index 47befad9..04860d6a 100644 --- a/tests/data/classSample.xml +++ b/tests/data/classSample.xml @@ -1,4 +1,6 @@ + Sample @@ -10,12 +12,9 @@ public_field Sample::public_field - - - - - - + Something + + @@ -26,12 +25,9 @@ protected_field Sample::protected_field - - - - - - + + + @@ -42,12 +38,10 @@ private_field Sample::private_field - - + Something - - + @@ -62,12 +56,9 @@ int x - - - - - - + + + @@ -82,12 +73,9 @@ int x - - - - - - + Something + + @@ -103,11 +91,10 @@ x + - - - - + + diff --git a/tests/data/examples/test_alias/compare.xml b/tests/data/examples/test_alias/compare.xml new file mode 100644 index 00000000..a65a1303 --- /dev/null +++ b/tests/data/examples/test_alias/compare.xml @@ -0,0 +1,23 @@ + + + + + Functions + + + void frob_foosvoid *Frobs + + Foo frob routine. + bob this something elseSide EffectsFrobs any foos.bob this something elseSide EffectsFrobs any foos. + + + Parameters + + Frobs[out] any foos. + + + + + + + diff --git a/tests/data/examples/test_alias/extra_dox_opts.txt b/tests/data/examples/test_alias/extra_dox_opts.txt new file mode 100644 index 00000000..54e59db8 --- /dev/null +++ b/tests/data/examples/test_alias/extra_dox_opts.txt @@ -0,0 +1 @@ +ALIASES = "sideeffect=\par Side Effects^^" diff --git a/tests/data/examples/test_alias/input.rst b/tests/data/examples/test_alias/input.rst new file mode 100644 index 00000000..5e2c4fbf --- /dev/null +++ b/tests/data/examples/test_alias/input.rst @@ -0,0 +1 @@ +.. doxygenfile:: alias.h diff --git a/tests/data/examples/test_array/compare.xml b/tests/data/examples/test_array/compare.xml index ab1001bb..a5775c5d 100644 --- a/tests/data/examples/test_array/compare.xml +++ b/tests/data/examples/test_array/compare.xml @@ -1,14 +1,16 @@ + - int fooint a[5] + int fooint a[5] My function. + - int barint nint a[] + int barint nint a[] My other function. Test:This declaration is supposed to be int bar(int n, int a[static n]); But, Sphinx fails to recognize int a[static n]) as a C specific array syntax diff --git a/tests/data/examples/test_c_enum/compare.xml b/tests/data/examples/test_c_enum/compare.xml index 99981e89..2a338d57 100644 --- a/tests/data/examples/test_c_enum/compare.xml +++ b/tests/data/examples/test_c_enum/compare.xml @@ -1,137 +1,158 @@ + - enum GSM_BackupFormat + enum GSM_BackupFormat Backup data. Values: + - enumerator GSM_Backup_Auto + enumerator GSM_Backup_Auto Compatibility with old gboolean used instead of format. File type is guessed for extension, non unicode format used for Gammu backup. + - enumerator GSM_Backup_AutoUnicode + enumerator GSM_Backup_AutoUnicode Compatibility with old gboolean used instead of format. File type is guessed for extension, unicode format used for Gammu backup. + - enumerator GSM_Backup_LMB + enumerator GSM_Backup_LMB LMB format, compatible with Logo manager, can store phonebooks and logos. + - enumerator GSM_Backup_VCalendar + enumerator GSM_Backup_VCalendar vCalendar standard, can store todo and calendar entries. + - enumerator GSM_Backup_VCard + enumerator GSM_Backup_VCard vCard standard, can store phone phonebook entries. + - enumerator GSM_Backup_LDIF + enumerator GSM_Backup_LDIF LDIF (LDAP Data Interchange Format), can store phone phonebook entries. + - enumerator GSM_Backup_ICS + enumerator GSM_Backup_ICS iCalendar standard, can store todo and calendar entries. + - enumerator GSM_Backup_Gammu + enumerator GSM_Backup_Gammu Gammu own format can store almost anything from phone. This is ASCII version of the format, Unicode strings are HEX encoded. Use GSM_Backup_GammuUCS2 instead if possible. + - enumerator GSM_Backup_GammuUCS2 + enumerator GSM_Backup_GammuUCS2 Gammu own format can store almost anything from phone. This is UCS2-BE version of the format. + - enumerator GSM_Backup_VNote + enumerator GSM_Backup_VNote vNote standard, can store phone notes. + - enumerator GSM_Backup_Auto + enumerator GSM_Backup_Auto Compatibility with old gboolean used instead of format. File type is guessed for extension, non unicode format used for Gammu backup. + - enumerator GSM_Backup_AutoUnicode + enumerator GSM_Backup_AutoUnicode Compatibility with old gboolean used instead of format. File type is guessed for extension, unicode format used for Gammu backup. + - enumerator GSM_Backup_LMB + enumerator GSM_Backup_LMB LMB format, compatible with Logo manager, can store phonebooks and logos. + - enumerator GSM_Backup_VCalendar + enumerator GSM_Backup_VCalendar vCalendar standard, can store todo and calendar entries. + - enumerator GSM_Backup_VCard + enumerator GSM_Backup_VCard vCard standard, can store phone phonebook entries. + - enumerator GSM_Backup_LDIF + enumerator GSM_Backup_LDIF LDIF (LDAP Data Interchange Format), can store phone phonebook entries. + - enumerator GSM_Backup_ICS + enumerator GSM_Backup_ICS iCalendar standard, can store todo and calendar entries. + - enumerator GSM_Backup_Gammu + enumerator GSM_Backup_Gammu Gammu own format can store almost anything from phone. This is ASCII version of the format, Unicode strings are HEX encoded. Use GSM_Backup_GammuUCS2 instead if possible. + - enumerator GSM_Backup_GammuUCS2 + enumerator GSM_Backup_GammuUCS2 Gammu own format can store almost anything from phone. This is UCS2-BE version of the format. + - enumerator GSM_Backup_VNote + enumerator GSM_Backup_VNote vNote standard, can store phone notes. diff --git a/tests/data/examples/test_c_file/compare.xml b/tests/data/examples/test_c_file/compare.xml new file mode 100644 index 00000000..ac1925ac --- /dev/null +++ b/tests/data/examples/test_c_file/compare.xml @@ -0,0 +1,130 @@ + + + + + Defines + + + WRITE_TREE_MISSING_OK + + bitmasks to write_cache_as_tree flags + + + + + WRITE_TREE_IGNORE_CACHE_TREE + + + + + + WRITE_TREE_UNREADABLE_INDEX + + error return codes + + + + + WRITE_TREE_UNMERGED_INDEX + + + + + + WRITE_TREE_PREFIX_ERROR + + + + + + Functions + + + struct cache_tree *cache_treevoid + + + + + + void cache_tree_freestruct cache_tree** + + + + + + void cache_tree_invalidate_pathstruct cache_tree*const char* + + + + + + struct cache_tree_sub *cache_tree_substruct cache_tree*const char* + + + + + + void cache_tree_writestruct strbuf*struct cache_tree *root + + + + + + struct cache_tree *cache_tree_readconst char *bufferunsigned long size + + + + + + int cache_tree_fully_validstruct cache_tree* + + + + + + int cache_tree_updatestruct cache_tree*struct cache_entry**intintint + + + + + + int write_cache_as_treeunsigned char *sha1int flagsconst char *prefix + + + + + + void prime_cache_treestruct cache_tree**struct tree* + + + + + + int cache_tree_matches_traversalstruct cache_tree*struct name_entry *entstruct traverse_info *info + + + + + + Variables + + + struct cache_tree global_cache_tree + + Shared cache tree instance. + + + + + + struct cache_tree_sub + + + + + + struct cache_tree + + + + diff --git a/tests/data/examples/test_c_file/input.rst b/tests/data/examples/test_c_file/input.rst new file mode 100644 index 00000000..2191a142 --- /dev/null +++ b/tests/data/examples/test_c_file/input.rst @@ -0,0 +1 @@ +.. doxygenfile:: c_file.h diff --git a/tests/data/examples/test_class/compare.xml b/tests/data/examples/test_class/compare.xml index f8f52e3d..ed7f920d 100644 --- a/tests/data/examples/test_class/compare.xml +++ b/tests/data/examples/test_class/compare.xml @@ -3,175 +3,204 @@ Functions + - template<typename T>void f0 + template<typename T>void f0 + - template<>void f0<std::string> + template<>void f0<std::string> + - class OuterClass + class OuterClass class outside of namespace + - class InnerClass + class InnerClass inner class + - class ClassTest + class ClassTest class outside of namespace Public Functions + - void functionint myParameter + void functionint myParameter non-namespaced class function More details in the header file. More documentation in the impl file + - void anotherFunction + void anotherFunction non-namespaced class other function More documentation in the impl file + - virtual void publicFunction const = 0 + virtual void publicFunction const = 0 namespaced class function + - virtual void undocumentedPublicFunction const = 0 + virtual void undocumentedPublicFunction const = 0 Protected Functions + - inline void protectedFunction + inline void protectedFunction A protected function. + - inline void undocumentedProtectedFunction + inline void undocumentedProtectedFunction Private Functions + - virtual void privateFunction const = 0 + virtual void privateFunction const = 0 This is a private function. + - virtual void undocumentedPrivateFunction const = 0 + virtual void undocumentedPrivateFunction const = 0 + - class PrivateClass + class PrivateClass A private class. + - struct PrivateStruct + struct PrivateStruct A private struct. + - class ProtectedClass + class ProtectedClass A protected class. + - struct ProtectedStruct + struct ProtectedStruct A protected struct. + - class PublicClass + class PublicClass A public class. + - struct PublicStruct + struct PublicStruct A public struct. + - class UndocumentedPrivateClass + class UndocumentedPrivateClass + - struct UndocumentedPrivateStruct + struct UndocumentedPrivateStruct + - class UndocumentedProtectedClass + class UndocumentedProtectedClass + - struct UndocumentedProtectedStruct + struct UndocumentedProtectedStruct + - class UndocumentedPublicClass + class UndocumentedPublicClass + - struct UndocumentedPublicStruct + struct UndocumentedPublicStruct + - namespace TestNamespaceClasses + namespace TestNamespaceClasses + - class ClassTest + class ClassTest second class inside of namespace Public Functions + - inline void function + inline void function second namespaced class function + - inline void anotherFunction + inline void anotherFunction second namespaced class other function @@ -179,25 +208,29 @@ + - class NamespacedClassTest + class NamespacedClassTest first class inside of namespace Public Functions + - virtual void function const = 0 + virtual void function const = 0 namespaced class function + - inline explicit NamespacedClassTest + inline explicit NamespacedClassTest + - inline void anotherFunction + inline void anotherFunction namespaced class other function @@ -205,8 +238,9 @@ Public Static Functions + - static void functionS + static void functionS @@ -215,34 +249,40 @@ + - namespace NS1 + namespace NS1 Functions + - template<typename T>void f1 + template<typename T>void f1 + - template<>void f1<std::string> + template<>void f1<std::string> + - namespace NS2 + namespace NS2 Functions + - template<typename T>void f2 + template<typename T>void f2 + - template<>void f2<std::string> + template<>void f2<std::string> diff --git a/tests/data/examples/test_code_blocks/compare.xml b/tests/data/examples/test_code_blocks/compare.xml index 8cf47862..4a7eac79 100644 --- a/tests/data/examples/test_code_blocks/compare.xml +++ b/tests/data/examples/test_code_blocks/compare.xml @@ -3,16 +3,18 @@ Functions + - void with_standard_code_block + void with_standard_code_block A function with an unannotated code block with C/C++ code. char* buffer = new char[42]; int charsAdded = sprintf(buffer, "Tabs are normally %d spaces\n", 8); + - void with_unannotated_cmake_code_block + void with_unannotated_cmake_code_block A function with an unannotated code block with non-C/C++ code. set(user_list A B C) @@ -22,8 +24,9 @@ endforeach() Another code-block that explicitly remains not highlighted. Show this as is. + - void with_annotated_cmake_code_block + void with_annotated_cmake_code_block A function with an annotated cmake code block. set(user_list A B C) diff --git a/tests/data/examples/test_cpp_concept/compare.xml b/tests/data/examples/test_cpp_concept/compare.xml index 60fdf8d4..74fb257e 100644 --- a/tests/data/examples/test_cpp_concept/compare.xml +++ b/tests/data/examples/test_cpp_concept/compare.xml @@ -1,8 +1,9 @@ + - template<typename T>concept Hashable + template<typename T>concept Hashable diff --git a/tests/data/examples/test_cpp_enum/compare.xml b/tests/data/examples/test_cpp_enum/compare.xml index e8ce0c17..5b81c11a 100644 --- a/tests/data/examples/test_cpp_enum/compare.xml +++ b/tests/data/examples/test_cpp_enum/compare.xml @@ -1,45 +1,53 @@ + - enum Unscoped + enum Unscoped Values: + - enumerator UnscopedEnumerator + enumerator UnscopedEnumerator + - enum class ScopedStruct : int + enum class ScopedStruct : int Values: + - enumerator Enumerator + enumerator Enumerator + - enum class ScopedClass : int + enum class ScopedClass : int Values: + - enumerator Enumerator + enumerator Enumerator + - enum class ScopedClassNoUnderlying + enum class ScopedClassNoUnderlying Values: + - enumerator Enumerator + enumerator Enumerator diff --git a/tests/data/examples/test_cpp_friendclass/compare.xml b/tests/data/examples/test_cpp_friendclass/compare.xml index ad7e269a..eabbdf48 100644 --- a/tests/data/examples/test_cpp_friendclass/compare.xml +++ b/tests/data/examples/test_cpp_friendclass/compare.xml @@ -1,18 +1,21 @@ + - struct A + struct A + - struct B + struct B + - struct C + struct C Friends diff --git a/tests/data/examples/test_cpp_inherited_members/compare.xml b/tests/data/examples/test_cpp_inherited_members/compare.xml index 43d6f961..86ade12e 100644 --- a/tests/data/examples/test_cpp_inherited_members/compare.xml +++ b/tests/data/examples/test_cpp_inherited_members/compare.xml @@ -1,15 +1,17 @@ + - class Base + class Base Base class. Subclassed by A, B Public Functions + - void f_issue_356 + void f_issue_356 Base-class member function. @@ -17,14 +19,16 @@ + - class A : public Base + class A : public Base Class A. Public Functions + - void f_issue_356 + void f_issue_356 Base-class member function. @@ -32,14 +36,16 @@ + - class B : public Base + class B : public Base Class B. Public Functions + - void f_issue_356 + void f_issue_356 Base-class member function. diff --git a/tests/data/examples/test_cpp_trailing_return_type/compare.xml b/tests/data/examples/test_cpp_trailing_return_type/compare.xml new file mode 100644 index 00000000..df22a0ca --- /dev/null +++ b/tests/data/examples/test_cpp_trailing_return_type/compare.xml @@ -0,0 +1,21 @@ + + + + + Functions + + + auto f_issue_441 -> Thingy* + + Function that creates a thingy. + + + + + + class Thingy + + needed for references in global function return type + + + diff --git a/tests/data/examples/test_cpp_trailing_return_type/input.rst b/tests/data/examples/test_cpp_trailing_return_type/input.rst new file mode 100644 index 00000000..749bd314 --- /dev/null +++ b/tests/data/examples/test_cpp_trailing_return_type/input.rst @@ -0,0 +1 @@ +.. doxygenfile:: cpp_trailing_return_type.h diff --git a/tests/data/examples/test_define/compare.xml b/tests/data/examples/test_define/compare.xml index f8f45ac1..5b4283eb 100644 --- a/tests/data/examples/test_define/compare.xml +++ b/tests/data/examples/test_define/compare.xml @@ -1,20 +1,23 @@ + - USE_STUFF + USE_STUFF A simple define without a value. + - MAX_LENGTH + MAX_LENGTH A define with a simple value. + - MAXIMUMAB + MAXIMUMAB A define with some parameters. @@ -40,8 +43,9 @@ + - SWAPAB + SWAPAB A define which spans multiple lines. diff --git a/tests/data/examples/test_dot_graphs/compare.xml b/tests/data/examples/test_dot_graphs/compare.xml index 1f32572b..ebea4a27 100644 --- a/tests/data/examples/test_dot_graphs/compare.xml +++ b/tests/data/examples/test_dot_graphs/compare.xml @@ -2,14 +2,16 @@ - page dotgraphs + page dotgraphs
Using @dot command +
basic graph elements
Using @dotfile command +
Captions go here
diff --git a/tests/data/examples/test_group/compare.xml b/tests/data/examples/test_group/compare.xml new file mode 100644 index 00000000..9f98429a --- /dev/null +++ b/tests/data/examples/test_group/compare.xml @@ -0,0 +1,73 @@ + + + + + group mygroup + + This is the first group. + + Functions + + + void groupedFunction + + This function is in MyGroup. + + + + + + class GroupedClassTest + + first class inside of namespace + + Public Functions + + + virtual void publicFunction const = 0 + + namespaced class function + + + + + + class PublicClass + + A protected class. + + + + + class UndocumentedPublicClass + + + + + + + + + group innergroup + + This is an inner group. + + + class InnerGroupClassTest + + inner class inside of namespace + + Public Functions + + + inline void function + + inner namespaced class function + + + + + + + + diff --git a/tests/data/examples/test_group/input.rst b/tests/data/examples/test_group/input.rst new file mode 100644 index 00000000..35fa6a6a --- /dev/null +++ b/tests/data/examples/test_group/input.rst @@ -0,0 +1,4 @@ +.. doxygengroup:: mygroup + :members: +.. doxygengroup:: innergroup + :members: diff --git a/tests/data/examples/test_headings/compare.xml b/tests/data/examples/test_headings/compare.xml index 61db6705..7ab8ba66 100644 --- a/tests/data/examples/test_headings/compare.xml +++ b/tests/data/examples/test_headings/compare.xml @@ -1,8 +1,9 @@ + - class HeadingsTest + class HeadingsTest This is a documentation. This is more documentation. diff --git a/tests/data/examples/test_param_dirs/compare.xml b/tests/data/examples/test_param_dirs/compare.xml new file mode 100644 index 00000000..ea412997 --- /dev/null +++ b/tests/data/examples/test_param_dirs/compare.xml @@ -0,0 +1,28 @@ + + + + + + int processvoid *ivoid *ovoid *io + + + + Parameters + + + + i[in] Input + + + o[out] Output + + + io[inout] Input and output + + + + + + + + diff --git a/tests/data/examples/test_param_dirs/input.rst b/tests/data/examples/test_param_dirs/input.rst new file mode 100644 index 00000000..94f81cdb --- /dev/null +++ b/tests/data/examples/test_param_dirs/input.rst @@ -0,0 +1 @@ +.. doxygenfunction:: process diff --git a/tests/data/examples/test_rst/compare.xml b/tests/data/examples/test_rst/compare.xml index 36f93979..1633fed0 100644 --- a/tests/data/examples/test_rst/compare.xml +++ b/tests/data/examples/test_rst/compare.xml @@ -1,14 +1,16 @@ + - class TestClass + class TestClass first class inside of namespace Public Functions + - virtual void function const = 0 + virtual void function const = 0 Inserting additional reStructuredText information. This is some funky non-XML compliant text: <& !>< @@ -20,8 +22,9 @@ + - virtual void rawVerbatim const = 0 + virtual void rawVerbatim const = 0 Inserting additional reStructuredText information. @@ -29,8 +32,9 @@ + - virtual void rawLeadingAsteriskVerbatim const = 0 + virtual void rawLeadingAsteriskVerbatim const = 0 Inserting additional reStructuredText information. Some example code:int example(int x) { @@ -38,8 +42,9 @@ } + - virtual void rawLeadingSlashesVerbatimint something const = 0 + virtual void rawLeadingSlashesVerbatimint something const = 0 Some kind of method. bool foo(bool something) { @@ -58,14 +63,16 @@ + - virtual void rawInlineVerbatim const = 0 + virtual void rawInlineVerbatim const = 0 Inserting an inline reStructuredText snippet. Linking to another function: TestClass::rawVerbatim() + - inline virtual void testFunction const + inline virtual void testFunction const Brief description. diff --git a/tests/data/examples/test_template_class_non_type/compare.xml b/tests/data/examples/test_template_class_non_type/compare.xml index aa345e5c..efa1afc7 100644 --- a/tests/data/examples/test_template_class_non_type/compare.xml +++ b/tests/data/examples/test_template_class_non_type/compare.xml @@ -1,8 +1,9 @@ + - template<typename T, typename U, int N>class anothertemplateclass + template<typename T, typename U, int N>class anothertemplateclass a class with three template parameters @@ -25,14 +26,16 @@ Public Functions + - inline anothertemplateclass + inline anothertemplateclass default constructor + - inline anothertemplateclassT const &m1U const &m2 + inline anothertemplateclassT const &m1U const &m2 constructor with two template argument @@ -52,8 +55,9 @@ + - U methodT const &t + U methodT const &t member accepting template argument and returning template argument diff --git a/tests/test_examples.py b/tests/test_examples.py index 2ba061bc..8cf7686b 100644 --- a/tests/test_examples.py +++ b/tests/test_examples.py @@ -29,7 +29,7 @@ """ C_FILE_SUFFIXES = frozenset(('.h', '.c', '.cpp')) -IGNORED_ELEMENTS = frozenset(('target', 'index')) +IGNORED_ELEMENTS = frozenset(()) BUFFER_SIZE = 0x1000 diff --git a/tests/test_filters.py b/tests/test_filters.py index 38dbfb36..08b8e55e 100644 --- a/tests/test_filters.py +++ b/tests/test_filters.py @@ -13,8 +13,7 @@ 'path': '', 'project': '', 'membergroups': '', - 'show': '', - 'undoc-members': None} + 'show': ''} @pytest.fixture(scope="module") def class_doc(): @@ -56,7 +55,7 @@ def create_class_filter(app, extra_ops): opts.update(extra_ops) return FilterFactory(app).create_class_filter('Sample', opts) -def test_no_class_members(app, members): +def test_members(app, members): app.config.breathe_default_members = [] filter = create_class_filter(app,{}) @@ -68,52 +67,35 @@ def test_no_class_members(app, members): assert not filter(members.private_field) assert not filter(members.private_method) -def test_public_class_members(app, members): - app.config.breathe_default_members = [] - - filter = create_class_filter(app,{'members': ''}) +bools = (True, False) - assert filter(members.public_field) - assert filter(members.public_method) - assert not filter(members.protected_field) - assert not filter(members.protected_method) - assert not filter(members.private_field) - assert not filter(members.private_method) - -def test_prot_class_members(app, members): +@pytest.mark.parametrize('public', bools) +@pytest.mark.parametrize('private', bools) +@pytest.mark.parametrize('protected', bools) +@pytest.mark.parametrize('undocumented', bools) +def test_public_class_members(app, members, public, private, protected, undocumented): app.config.breathe_default_members = [] - filter = create_class_filter(app,{ - 'members': '', - 'protected-members': None}) + opts = {} + if public: opts['members'] = None + if private: opts['private-members'] = None + if protected: opts['protected-members'] = None + if undocumented: opts['undoc-members'] = None + filter = create_class_filter(app,opts) - assert filter(members.public_field) - assert filter(members.public_method) - assert filter(members.protected_field) - assert filter(members.protected_method) - assert not filter(members.private_field) - assert not filter(members.private_method) - -def test_all_class_members(app, members): - app.config.breathe_default_members = [] - - filter = create_class_filter(app,{ - 'members': '', - 'protected-members': None, - 'private-members': None}) - - assert filter(members.public_field) - assert filter(members.public_method) - assert filter(members.protected_field) - assert filter(members.protected_method) - assert filter(members.private_field) - assert filter(members.private_method) + assert filter(members.public_field) == public + assert filter(members.public_method) == (public and undocumented) + assert filter(members.protected_field) == (protected and undocumented) + assert filter(members.protected_method) == protected + assert filter(members.private_field) == private + assert filter(members.private_method) == (private and undocumented) def test_specific_class_members(app, members): app.config.breathe_default_members = [] filter = create_class_filter(app,{ - 'members': 'public_method,protected_method,private_field'}) + 'members': 'public_method,protected_method,private_field', + 'undoc-members': None}) assert not filter(members.public_field) assert filter(members.public_method) From 23cdf28e71976758cb2677eb2a3e0732baf65a05 Mon Sep 17 00:00:00 2001 From: Rouslan Korneychuk Date: Sun, 10 Dec 2023 03:21:59 -0500 Subject: [PATCH 27/65] A few more fixes; a bunch more tests --- breathe/finder/__init__.py | 12 +- breathe/finder/compound.py | 24 +- breathe/finder/factory.py | 25 +- breathe/finder/index.py | 24 +- breathe/parser.py | 19 +- breathe/renderer/__init__.py | 9 +- breathe/renderer/filter.py | 6 +- breathe/renderer/sphinxrenderer.py | 520 ++++++++++++------ examples/specific/cpp_function.h | 2 + examples/specific/group_content_only.hpp | 17 + examples/specific/inheritance.h | 4 + mkrelease | 2 +- .../examples/test_cpp_function/compare.xml | 78 +++ .../data/examples/test_cpp_function/input.rst | 1 + .../test_group_content_only/compare.xml | 11 + .../test_group_content_only/input.rst | 2 + .../examples/test_inheritance/compare.xml | 58 ++ .../data/examples/test_inheritance/input.rst | 1 + tests/data/examples/test_inline/compare.xml | 49 ++ tests/data/examples/test_inline/input.rst | 2 + .../data/examples/test_latexmath/compare.xml | 15 + tests/data/examples/test_latexmath/input.rst | 1 + tests/data/examples/test_links/compare.xml | 12 + tests/data/examples/test_links/input.rst | 1 + tests/data/examples/test_lists/compare.xml | 103 ++++ tests/data/examples/test_lists/input.rst | 1 + .../examples/test_membergroups/compare.xml | 38 ++ .../data/examples/test_membergroups/input.rst | 2 + .../test_qtsignalsandslots/compare.xml | 61 ++ .../examples/test_qtsignalsandslots/input.rst | 1 + .../data/examples/test_simplesect/compare.xml | 49 ++ tests/data/examples/test_simplesect/input.rst | 1 + tests/data/examples/test_tables/compare.xml | 54 ++ tests/data/examples/test_tables/input.rst | 1 + .../test_template_function/compare.xml | 99 ++++ .../examples/test_template_function/input.rst | 1 + .../test_template_type_alias/compare.xml | 47 ++ .../test_template_type_alias/input.rst | 1 + tests/data/examples/test_union/compare.xml | 111 ++++ tests/data/examples/test_union/input.rst | 1 + .../examples/test_userdefined/compare.xml | 51 ++ .../data/examples/test_userdefined/input.rst | 3 + tests/test_examples.py | 16 +- xml_parser_generator/module_template.c | 32 +- xml_parser_generator/stubs_template.pyi | 9 +- 45 files changed, 1345 insertions(+), 232 deletions(-) create mode 100644 examples/specific/group_content_only.hpp create mode 100644 tests/data/examples/test_cpp_function/compare.xml create mode 100644 tests/data/examples/test_cpp_function/input.rst create mode 100644 tests/data/examples/test_group_content_only/compare.xml create mode 100644 tests/data/examples/test_group_content_only/input.rst create mode 100644 tests/data/examples/test_inheritance/compare.xml create mode 100644 tests/data/examples/test_inheritance/input.rst create mode 100644 tests/data/examples/test_inline/compare.xml create mode 100644 tests/data/examples/test_inline/input.rst create mode 100644 tests/data/examples/test_latexmath/compare.xml create mode 100644 tests/data/examples/test_latexmath/input.rst create mode 100644 tests/data/examples/test_links/compare.xml create mode 100644 tests/data/examples/test_links/input.rst create mode 100644 tests/data/examples/test_lists/compare.xml create mode 100644 tests/data/examples/test_lists/input.rst create mode 100644 tests/data/examples/test_membergroups/compare.xml create mode 100644 tests/data/examples/test_membergroups/input.rst create mode 100644 tests/data/examples/test_qtsignalsandslots/compare.xml create mode 100644 tests/data/examples/test_qtsignalsandslots/input.rst create mode 100644 tests/data/examples/test_simplesect/compare.xml create mode 100644 tests/data/examples/test_simplesect/input.rst create mode 100644 tests/data/examples/test_tables/compare.xml create mode 100644 tests/data/examples/test_tables/input.rst create mode 100644 tests/data/examples/test_template_function/compare.xml create mode 100644 tests/data/examples/test_template_function/input.rst create mode 100644 tests/data/examples/test_template_type_alias/compare.xml create mode 100644 tests/data/examples/test_template_type_alias/input.rst create mode 100644 tests/data/examples/test_union/compare.xml create mode 100644 tests/data/examples/test_union/input.rst create mode 100644 tests/data/examples/test_userdefined/compare.xml create mode 100644 tests/data/examples/test_userdefined/input.rst diff --git a/breathe/finder/__init__.py b/breathe/finder/__init__.py index a7dc1f0b..87752173 100644 --- a/breathe/finder/__init__.py +++ b/breathe/finder/__init__.py @@ -6,14 +6,14 @@ from breathe.project import ProjectInfo from breathe.finder.factory import DoxygenItemFinderFactory from breathe.renderer.filter import DoxFilter - from breathe.renderer import TaggedNode + from breathe.renderer import TaggedNode, T_data_object +else: + T_data_object = TypeVar('T_data_object', covariant=True) -T = TypeVar('T', covariant=True) - -class ItemFinder(Generic[T]): - def __init__(self, project_info: ProjectInfo, data_object: T, item_finder_factory: DoxygenItemFinderFactory): - self.data_object = data_object +class ItemFinder(Generic[T_data_object]): + def __init__(self, project_info: ProjectInfo, node: TaggedNode[T_data_object], item_finder_factory: DoxygenItemFinderFactory): + self.node = node self.item_finder_factory: DoxygenItemFinderFactory = item_finder_factory self.project_info = project_info diff --git a/breathe/finder/compound.py b/breathe/finder/compound.py index 05ef8eb9..8fbb55a7 100644 --- a/breathe/finder/compound.py +++ b/breathe/finder/compound.py @@ -14,9 +14,9 @@ class DoxygenTypeSubItemFinder(ItemFinder[parser.Node_DoxygenType]): def filter_(self, ancestors, filter_: DoxFilter, matches: list[list[TaggedNode]]) -> None: """Find nodes which match the filter. Doesn't test this node, only its children""" - node_stack = [TaggedNode(None,self.data_object)] + ancestors - assert len(self.data_object.compounddef) == 1 - compound_finder = self.item_finder_factory.create_finder(self.data_object.compounddef[0]) + node_stack = [self.node] + ancestors + assert len(self.node.value.compounddef) == 1 + compound_finder = self.item_finder_factory.create_finder(self.node.value.compounddef[0]) compound_finder.filter_(node_stack, filter_, matches) @@ -24,16 +24,16 @@ class CompoundDefTypeSubItemFinder(ItemFinder[parser.Node_compounddefType]): def filter_(self, ancestors, filter_: DoxFilter, matches: list[list[TaggedNode]]) -> None: """Finds nodes which match the filter and continues checks to children""" - node_stack = [TaggedNode(None,self.data_object)] + ancestors + node_stack = [self.node] + ancestors if filter_(NodeStack(node_stack)): matches.append(node_stack) - for sectiondef in self.data_object.sectiondef: + for sectiondef in self.node.value.sectiondef: finder = self.item_finder_factory.create_finder(sectiondef) finder.filter_(node_stack, filter_, matches) - for innerclass in self.data_object.innerclass: - finder = self.item_finder_factory.create_finder(innerclass) + for innerclass in self.node.value.innerclass: + finder = self.item_finder_factory.create_finder(innerclass, "innerclass") finder.filter_(node_stack, filter_, matches) @@ -41,19 +41,19 @@ class SectionDefTypeSubItemFinder(ItemFinder[parser.Node_sectiondefType]): def filter_(self, ancestors, filter_: DoxFilter, matches: list[list[TaggedNode]]) -> None: """Find nodes which match the filter. Doesn't test this node, only its children""" - node_stack = [TaggedNode(None,self.data_object)] + ancestors + node_stack = [self.node] + ancestors if filter_(NodeStack(node_stack)): matches.append(node_stack) - for memberdef in self.data_object.memberdef: + for memberdef in self.node.value.memberdef: finder = self.item_finder_factory.create_finder(memberdef) finder.filter_(node_stack, filter_, matches) class MemberDefTypeSubItemFinder(ItemFinder[parser.Node_memberdefType]): def filter_(self, ancestors, filter_: DoxFilter, matches: list[list[TaggedNode]]) -> None: - data_object = self.data_object - node_stack = [TaggedNode(None,self.data_object)] + ancestors + data_object = self.node.value + node_stack = [self.node] + ancestors if filter_(NodeStack(node_stack)): matches.append(node_stack) @@ -67,6 +67,6 @@ def filter_(self, ancestors, filter_: DoxFilter, matches: list[list[TaggedNode]] class RefTypeSubItemFinder(ItemFinder[parser.Node_refType]): def filter_(self, ancestors, filter_: DoxFilter, matches: list[list[TaggedNode]]) -> None: - node_stack = [TaggedNode(None,self.data_object)] + ancestors + node_stack = [self.node] + ancestors if filter_(NodeStack(node_stack)): matches.append(node_stack) diff --git a/breathe/finder/factory.py b/breathe/finder/factory.py index 86da4f7a..f92dae8d 100644 --- a/breathe/finder/factory.py +++ b/breathe/finder/factory.py @@ -9,20 +9,21 @@ from sphinx.application import Sphinx -from typing import Any, Callable, TYPE_CHECKING +from typing import Callable, TYPE_CHECKING, Union if TYPE_CHECKING: from breathe.renderer.filter import DoxFilter - ItemFinderCreator = Callable[[ProjectInfo,Any,'DoxygenItemFinderFactory'],ItemFinder] + ItemFinderCreator = Callable[[ProjectInfo,TaggedNode,'DoxygenItemFinderFactory'],ItemFinder] - FinderRoot = (parser.Node_DoxygenTypeIndex - | parser.Node_CompoundType - | parser.Node_MemberType - | parser.Node_DoxygenType - | parser.Node_compounddefType - | parser.Node_sectiondefType - | parser.Node_memberdefType - | parser.Node_refType) + FinderRoot = Union[ + parser.Node_DoxygenTypeIndex, + parser.Node_CompoundType, + parser.Node_MemberType, + parser.Node_DoxygenType, + parser.Node_compounddefType, + parser.Node_sectiondefType, + parser.Node_memberdefType, + parser.Node_refType] class _CreateCompoundTypeSubFinder: @@ -40,8 +41,8 @@ def __init__(self, finders: dict[type[parser.NodeOrValue], ItemFinderCreator], p self.finders = finders self.project_info = project_info - def create_finder(self, data_object) -> ItemFinder: - return self.finders[type(data_object)](self.project_info, data_object, self) + def create_finder(self, data_object: parser.NodeOrValue, tag: str | None = None) -> ItemFinder: + return self.finders[type(data_object)](self.project_info, TaggedNode(tag, data_object), self) class Finder: diff --git a/breathe/finder/index.py b/breathe/finder/index.py index 9b690946..dde3e448 100644 --- a/breathe/finder/index.py +++ b/breathe/finder/index.py @@ -7,7 +7,7 @@ from sphinx.application import Sphinx -from typing import Any, TYPE_CHECKING +from typing import TYPE_CHECKING if TYPE_CHECKING: from breathe.renderer.filter import DoxFilter @@ -17,8 +17,8 @@ class DoxygenTypeSubItemFinder(ItemFinder[parser.Node_DoxygenTypeIndex]): def filter_(self, ancestors, filter_: DoxFilter, matches) -> None: """Find nodes which match the filter. Doesn't test this node, only its children""" - compounds = self.data_object.compound - node_stack = [TaggedNode(None,self.data_object)] + ancestors + compounds = self.node.value.compound + node_stack = [self.node] + ancestors for compound in compounds: compound_finder = self.item_finder_factory.create_finder(compound) compound_finder.filter_(node_stack, filter_, matches) @@ -39,16 +39,16 @@ def filter_(self, ancestors: list[TaggedNode], filter_: DoxFilter, matches) -> N top level node of the compound file. """ - node_stack = [TaggedNode(None,self.data_object)] + ancestors + node_stack = [self.node] + ancestors # Match against compound object if filter_(NodeStack(node_stack)): matches.append(node_stack) # Descend to member children - members = self.data_object.member - # TODO: find a more precise type for the Doxygen nodes - member_matches: list[Any] = [] + members = self.node.value.member + + member_matches: list[list[TaggedNode]] = [] for member in members: member_finder = self.item_finder_factory.create_finder(member) member_finder.filter_(node_stack, filter_, member_matches) @@ -56,11 +56,13 @@ def filter_(self, ancestors: list[TaggedNode], filter_: DoxFilter, matches) -> N # If there are members in this compound that match the criteria # then load up the file for this compound and get the member data objects if member_matches: - file_data = self.compound_parser.parse(self.data_object.refid) + file_data = self.compound_parser.parse(self.node.value.refid) finder = self.item_finder_factory.create_finder(file_data) for member_stack in member_matches: - refid = member_stack[0].value.refid + mem = member_stack[0].value + assert isinstance(mem, parser.Node_MemberType) + refid = mem.refid def ref_filter(nstack): node = nstack.node return isinstance(node,parser.Node_memberdefType) and node.id == refid @@ -68,14 +70,14 @@ def ref_filter(nstack): finder.filter_(node_stack, ref_filter, matches) else: # Read in the xml file referenced by the compound and descend into that as well - file_data = self.compound_parser.parse(self.data_object.refid) + file_data = self.compound_parser.parse(self.node.value.refid) finder = self.item_finder_factory.create_finder(file_data) finder.filter_(node_stack, filter_, matches) class MemberTypeSubItemFinder(ItemFinder[parser.Node_memberdefType]): def filter_(self, ancestors, filter_: DoxFilter, matches) -> None: - node_stack = [TaggedNode(None,self.data_object)] + ancestors + node_stack = [self.node] + ancestors # Match against member object if filter_(NodeStack(node_stack)): diff --git a/breathe/parser.py b/breathe/parser.py index 4551d0fe..483783b1 100644 --- a/breathe/parser.py +++ b/breathe/parser.py @@ -16,13 +16,26 @@ @reprlib.recursive_repr() def node_repr(self: Node) -> str: cls = type(self) - fields = ', '.join(f'{field}={getattr(self,field)!r}' for field in cls._fields) + fields = [] if isinstance(self,FrozenList): pos = ', '.join(map(repr,self)) - fields = f'[{pos}], {fields}' - return f'{cls.__name__}({fields})' + fields.append(f'[{pos}]') + fields.extend(f'{field}={getattr(self,field)!r}' for field in cls._fields) + inner = ', '.join(fields) + return f'{cls.__name__}({inner})' Node.__repr__ = node_repr # type: ignore +@reprlib.recursive_repr() +def taggedvalue_repr(self: TaggedValue) -> str: + return f'{self.__class__.__name__}({self.name!r}, {self.value!r})' +TaggedValue.__repr__ = taggedvalue_repr # type: ignore + +@reprlib.recursive_repr() +def frozenlist_repr(self: FrozenList) -> str: + inner = ', '.join(map(repr,self)) + return f'{self.__class__.__name__}([{inner}])' +FrozenList.__repr__ = frozenlist_repr # type: ignore + def description_has_content(node: Node_descriptionType | None) -> bool: if node is None: return False if bool(node.title) or len(node) > 1: return True diff --git a/breathe/renderer/__init__.py b/breathe/renderer/__init__.py index fd24796d..9b5fe5aa 100644 --- a/breathe/renderer/__init__.py +++ b/breathe/renderer/__init__.py @@ -2,7 +2,7 @@ from docutils import nodes import textwrap -from typing import NamedTuple, TYPE_CHECKING, Union +from typing import Generic, NamedTuple, TYPE_CHECKING, TypeVar, Union if TYPE_CHECKING: from breathe import parser @@ -10,6 +10,9 @@ from breathe.directives.index import RootDataObject DataObject = Union[parser.NodeOrValue, RootDataObject] + T_data_object = TypeVar('T_data_object', bound=DataObject, covariant = True) +else: + T_data_object = TypeVar('T_data_object', covariant = True) def format_parser_error(name: str, error: str, filename: str, state, lineno: int, do_unicode_warning: bool = False) -> list[nodes.Node]: @@ -44,9 +47,9 @@ def format_parser_error(name: str, error: str, filename: str, state, lineno: int ] -class TaggedNode(NamedTuple): +class TaggedNode(NamedTuple, Generic[T_data_object]): tag: str | None - value: DataObject + value: T_data_object class RenderContext: def __init__( diff --git a/breathe/renderer/filter.py b/breathe/renderer/filter.py index 12c8b354..227669c1 100644 --- a/breathe/renderer/filter.py +++ b/breathe/renderer/filter.py @@ -98,8 +98,10 @@ def node(self) -> renderer.DataObject: return self.stack[0].value @property - def tag(self) -> str | None: - return self.stack[0].tag + def tag(self) -> str: + tag = self.stack[0].tag + assert tag is not None + return tag def path_matches(location: str, target_file: str) -> bool: diff --git a/breathe/renderer/sphinxrenderer.py b/breathe/renderer/sphinxrenderer.py index 5f98e389..ed8fd96d 100644 --- a/breathe/renderer/sphinxrenderer.py +++ b/breathe/renderer/sphinxrenderer.py @@ -18,7 +18,20 @@ import re import textwrap -from typing import Any, Callable, cast, ClassVar, Generic, Optional, Protocol, Type, TypeVar, TYPE_CHECKING, Union +from typing import ( + Any, + Callable, + cast, + ClassVar, + Generic, + Literal, + Optional, + Protocol, + Type, + TypeVar, + TYPE_CHECKING, + Union, +) from collections.abc import Iterable, Sequence @@ -35,7 +48,7 @@ cs = None -T = TypeVar('T') +T = TypeVar("T") if TYPE_CHECKING: from breathe.project import ProjectInfo @@ -49,11 +62,23 @@ class HasRefID(Protocol): @property - def refid(self) -> str: ... - + def refid(self) -> str: + ... + class HasTemplateParamList(Protocol): @property - def templateparamlist(self) -> parser.Node_templateparamlistType | None: ... + def templateparamlist(self) -> parser.Node_templateparamlistType | None: + ... + + class HasDescriptions(Protocol): + @property + def briefdescription(self) -> parser.Node_descriptionType | None: + ... + + @property + def detaileddescription(self) -> parser.Node_descriptionType | None: + ... + ContentCallback = Callable[[addnodes.desc_content], None] Declarator = Union[addnodes.desc_signature, addnodes.desc_signature_line] @@ -82,13 +107,13 @@ def __exit__(self, et, ev, bt): class BaseObject: # Use this class as the first base class to make sure the overrides are used. # Set the content_callback attribute to a function taking a docutils node. + breathe_content_callback: ContentCallback | None = None def transform_content(self, contentnode: addnodes.desc_content) -> None: super().transform_content(contentnode) # type: ignore - callback = getattr(self, "breathe_content_callback", None) - if callback is None: + if self.breathe_content_callback is None: return - callback(contentnode) + self.breathe_content_callback(contentnode) # ---------------------------------------------------------------------------- @@ -499,67 +524,77 @@ def inlinetext(self, match, context, next_state): self.parent += msg return [], next_state, [] + def get_content(node: parser.Node_docParaType): # Add programlisting nodes to content rather than a separate list, # because programlisting and content nodes can interleave as shown in # https://www.stack.nl/~dimitri/doxygen/manual/examples/include/html/example.html. - return (item - for item in node - if parser.tag_name_value(item)[0] not in {'parameterlist','simplesect','image'}) + return ( + item + for item in node + if parser.tag_name_value(item)[0] not in {"parameterlist", "simplesect", "image"} + ) + def get_parameterlists(node: parser.Node_docParaType): - return (value - for name,value in map(parser.tag_name_value,node) - if name == 'parameterlist') + return (value for name, value in map(parser.tag_name_value, node) if name == "parameterlist") + def get_simplesects(node: parser.Node_docParaType): - return (value - for name,value in map(parser.tag_name_value,node) - if name == 'simplesect') + return (value for name, value in map(parser.tag_name_value, node) if name == "simplesect") + def get_images(node: parser.Node_docParaType): - return (value - for name,value in map(parser.tag_name_value,node) - if name == 'image') + return (value for name, value in map(parser.tag_name_value, node) if name == "image") class NodeHandler(Generic[T]): """Dummy callable that associates a set of nodes to a function. This gets unwrapped by NodeVisitor and is never actually called.""" - def __init__(self,handler: Callable[[SphinxRenderer, T], list[Node]]): + def __init__(self, handler: Callable[[SphinxRenderer, T], list[Node]]): self.handler = handler self.nodes: set[type[parser.NodeOrValue]] = set() - def __call__(self, r: SphinxRenderer, node: T, /) -> list[Node]: + def __call__(self, r: SphinxRenderer, node: T, /) -> list[Node]: # pragma: no cover raise TypeError() + class TaggedNodeHandler(Generic[T]): """Dummy callable that associates a set of nodes to a function. This gets unwrapped by NodeVisitor and is never actually called.""" - def __init__(self,handler: Callable[[SphinxRenderer, str, T], list[Node]]): + def __init__(self, handler: Callable[[SphinxRenderer, str, T], list[Node]]): self.handler = handler self.nodes: set[type[parser.NodeOrValue]] = set() - def __call__(self, r: SphinxRenderer, tag: str, node: T, /) -> list[Node]: + def __call__(self, r: SphinxRenderer, tag: str, node: T, /) -> list[Node]: # pragma: no cover raise TypeError() + def node_handler(node: type[parser.NodeOrValue]): - def inner(f: Callable[[SphinxRenderer, T], list[Node]]) -> Callable[[SphinxRenderer, T], list[Node]]: - handler: NodeHandler = f if isinstance(f,NodeHandler) else NodeHandler(f) + def inner( + f: Callable[[SphinxRenderer, T], list[Node]] + ) -> Callable[[SphinxRenderer, T], list[Node]]: + handler: NodeHandler = f if isinstance(f, NodeHandler) else NodeHandler(f) handler.nodes.add(node) return handler + return inner + def tagged_node_handler(node: type[parser.NodeOrValue]): - def inner(f: Callable[[SphinxRenderer, str, T], list[Node]]) -> Callable[[SphinxRenderer, str, T], list[Node]]: - handler: TaggedNodeHandler = f if isinstance(f,TaggedNodeHandler) else TaggedNodeHandler(f) + def inner( + f: Callable[[SphinxRenderer, str, T], list[Node]] + ) -> Callable[[SphinxRenderer, str, T], list[Node]]: + handler: TaggedNodeHandler = f if isinstance(f, TaggedNodeHandler) else TaggedNodeHandler(f) handler.nodes.add(node) return handler + return inner + class NodeVisitor(type): """Metaclass that collects all methods marked as @node_handler and @tagged_node_handler into the dicts 'node_handlers' and @@ -578,20 +613,41 @@ def __new__(cls, name, bases, members): for n in value.nodes: tagged_handlers[n] = value.handler members[key] = value.handler - - members['node_handlers'] = handlers - members['tagged_node_handlers'] = tagged_handlers + + members["node_handlers"] = handlers + members["tagged_node_handlers"] = tagged_handlers return type.__new__(cls, name, bases, members) + +# class RenderDebugPrint: +# def __init__(self,renderer,node): +# self.renderer = renderer +# renderer._debug_print_depth = 1 + getattr(renderer,'_debug_print_depth',0) +# print(' '*renderer._debug_print_depth,type(node)) +# +# def __enter__(self): +# return self +# +# def __exit__(self,*args): +# self.renderer._debug_print_depth -= 1 + + class SphinxRenderer(metaclass=NodeVisitor): """ Doxygen node visitor that converts input into Sphinx/RST representation. Each visit method takes a Doxygen node as an argument and returns a list of RST nodes. """ - node_handlers: ClassVar[dict[type[parser.NodeOrValue], Callable[[SphinxRenderer, parser.NodeOrValue], list[Node]]]] - tagged_node_handlers: ClassVar[dict[type[parser.NodeOrValue], Callable[[SphinxRenderer, str, parser.NodeOrValue], list[Node]]]] + node_handlers: ClassVar[ + dict[type[parser.NodeOrValue], Callable[[SphinxRenderer, parser.NodeOrValue], list[Node]]] + ] + tagged_node_handlers: ClassVar[ + dict[ + type[parser.NodeOrValue], + Callable[[SphinxRenderer, str, parser.NodeOrValue], list[Node]], + ] + ] def __init__( self, @@ -683,10 +739,10 @@ def get_filename(node) -> Optional[str]: node = node_stack[0].value # An enumvalueType node doesn't have location, so use its parent node # for detecting the domain instead. - if isinstance(node, (str,parser.Node_enumvalueType)): + if isinstance(node, (str, parser.Node_enumvalueType)): node = node_stack[1].value filename = get_filename(node) - if not filename and isinstance(node,parser.Node_CompoundType): + if not filename and isinstance(node, parser.Node_CompoundType): file_data = self.compound_parser.parse(node.refid) filename = get_filename(file_data.compounddef) return self.project_info.domain_for_file(filename) if filename else "" @@ -702,8 +758,8 @@ def run_directive( assert self.context is not None args = [obj_type, [declaration]] + self.context.directive_args[2:] directive = DomainDirectiveFactory.create(self.context.domain, args) - assert issubclass(type(directive), BaseObject) - directive.breathe_content_callback = contentCallback # type: ignore + assert isinstance(directive, BaseObject) + directive.breathe_content_callback = contentCallback # Translate Breathe's no-link option into the standard noindex option. if "no-link" in self.context.directive_args[2]: @@ -714,7 +770,7 @@ def run_directive( assert self.app.env is not None config = self.app.env.config - if config.breathe_debug_trace_directives: + if config.breathe_debug_trace_directives: # pragma: no cover global _debug_indent print( "{}Running directive: .. {}:: {}".format( @@ -733,7 +789,7 @@ def run_directive( for k, v in options.items(): del directive.options[k] - if config.breathe_debug_trace_directives: + if config.breathe_debug_trace_directives: # pragma: no cover _debug_indent -= 1 # Filter out outer class names if we are rendering a member as a part of a class content. @@ -755,9 +811,32 @@ def run_directive( signode.children = [n for n in signode.children if not n.tagname == "desc_addname"] return nodes + def handle_compounddef_declaration( + self, + node: parser.Node_compounddefType, + obj_type: str, + declaration: str, + file_data, + new_context, + parent_context, + display_obj_type: str | None = None, + ) -> list[Node]: + def content(contentnode) -> None: + if node.includes: + for include in node.includes: + contentnode.extend( + self.render(include, new_context.create_child_context(include)) + ) + rendered_data = self.render(file_data, parent_context) + contentnode.extend(rendered_data) + + return self.handle_declaration( + node, obj_type, declaration, content_callback=content, display_obj_type=display_obj_type + ) + def handle_declaration( self, - node, + node: parser.Node_compounddefType | parser.Node_memberdefType | parser.Node_enumvalueType, obj_type: str, declaration: str, *, @@ -768,7 +847,7 @@ def handle_declaration( ) -> list[Node]: if content_callback is None: - def content(contentnode): + def content(contentnode: addnodes.desc_content): contentnode.extend(self.description(node)) content_callback = content @@ -798,7 +877,7 @@ def content(contentnode): assert isinstance(sig, addnodes.desc_signature) # if may or may not be a multiline signature isMultiline = sig.get("is_multiline", False) - declarator: Optional[Declarator] = None + declarator: Declarator | None = None if isMultiline: for line in sig: assert isinstance(line, addnodes.desc_signature_line) @@ -851,14 +930,22 @@ def debug_print_node(n): names: list[str] = [] for node in self.qualification_stack[1:]: if config.breathe_debug_trace_qualification: - print("{}{}".format(_debug_indent * " ", debug_print_node(node))) # pyright: ignore - if isinstance(node,parser.Node_refType) and len(names) == 0: + print( + "{}{}".format(_debug_indent * " ", debug_print_node(node)) # pyright: ignore + ) + if isinstance(node, parser.Node_refType) and len(names) == 0: if config.breathe_debug_trace_qualification: print("{}{}".format(_debug_indent * " ", "res=")) return [] if ( - isinstance(node,parser.Node_CompoundType) and node.kind not in [parser.CompoundKind.file, parser.CompoundKind.namespace, parser.CompoundKind.group] - ) or isinstance(node,parser.Node_memberdefType): + isinstance(node, parser.Node_CompoundType) + and node.kind + not in [ + parser.CompoundKind.file, + parser.CompoundKind.namespace, + parser.CompoundKind.group, + ] + ) or isinstance(node, parser.Node_memberdefType): # We skip the 'file' entries because the file name doesn't form part of the # qualified name for the identifier. We skip the 'namespace' entries because if we # find an object through the namespace 'compound' entry in the index.xml then we'll @@ -866,7 +953,10 @@ def debug_print_node(n): # need the 'compounddef' entry because if we find the object through the 'file' # entry in the index.xml file then we need to get the namespace name from somewhere names.append(node.name) - if isinstance(node,parser.Node_compounddefType) and node.kind == parser.DoxCompoundKind.namespace: + if ( + isinstance(node, parser.Node_compounddefType) + and node.kind == parser.DoxCompoundKind.namespace + ): # Nested namespaces include their parent namespace(s) in compoundname. ie, # compoundname is 'foo::bar' instead of just 'bar' for namespace 'bar' nested in # namespace 'foo'. We need full compoundname because node_stack doesn't necessarily @@ -890,16 +980,25 @@ def get_fully_qualified_name(self): node = node_stack[0] # If the node is a namespace, use its name because namespaces are skipped in the main loop. - if isinstance(node.value,parser.Node_CompoundType) and node.value.kind == parser.CompoundKind.namespace: + if ( + isinstance(node.value, parser.Node_CompoundType) + and node.value.kind == parser.CompoundKind.namespace + ): names.append(node.value.name) for tval in node_stack: node = tval.value - if isinstance(node,parser.Node_refType) and len(names) == 0: - return ''.join(node) + if isinstance(node, parser.Node_refType) and len(names) == 0: + return "".join(node) if ( - isinstance(node,parser.Node_CompoundType) and node.kind not in [parser.CompoundKind.file, parser.CompoundKind.namespace, parser.CompoundKind.group] - ) or isinstance(node,parser.Node_memberdefType): + isinstance(node, parser.Node_CompoundType) + and node.kind + not in [ + parser.CompoundKind.file, + parser.CompoundKind.namespace, + parser.CompoundKind.group, + ] + ) or isinstance(node, parser.Node_memberdefType): # We skip the 'file' entries because the file name doesn't form part of the # qualified name for the identifier. We skip the 'namespace' entries because if we # find an object through the namespace 'compound' entry in the index.xml then we'll @@ -907,7 +1006,10 @@ def get_fully_qualified_name(self): # need the 'compounddef' entry because if we find the object through the 'file' # entry in the index.xml file then we need to get the namespace name from somewhere names.insert(0, node.name) - if isinstance(node,parser.Node_compounddefType) and node.kind == parser.DoxCompoundKind.namespace: + if ( + isinstance(node, parser.Node_compounddefType) + and node.kind == parser.DoxCompoundKind.namespace + ): # Nested namespaces include their parent namespace(s) in compoundname. ie, # compoundname is 'foo::bar' instead of just 'bar' for namespace 'bar' nested in # namespace 'foo'. We need full compoundname because node_stack doesn't necessarily @@ -935,7 +1037,7 @@ def run_domain_directive(self, kind, names): domain_directive.options["noindex"] = True config = self.app.env.config - if config.breathe_debug_trace_directives: + if config.breathe_debug_trace_directives: # pragma: no cover global _debug_indent print( "{}Running directive (old): .. {}:: {}".format( @@ -946,7 +1048,7 @@ def run_domain_directive(self, kind, names): nodes = domain_directive.run() - if config.breathe_debug_trace_directives: + if config.breathe_debug_trace_directives: # pragma: no cover _debug_indent -= 1 # Filter out outer class names if we are rendering a member as a part of a class content. @@ -983,13 +1085,48 @@ def title(self, node) -> list[Node]: nodes_.append(addnodes.desc_name(text=node.name)) return nodes_ - def description(self, node) -> list[Node]: + def description(self, node: HasDescriptions) -> list[Node]: brief = self.render_optional(node.briefdescription) - detailed = self.detaileddescription(node) + descr = node.detaileddescription + if isinstance(node, parser.Node_memberdefType): + params = [] + for p in node.param: + if p.briefdescription: + params.append( + parser.Node_docParamListItem( + parameterdescription=p.briefdescription, + parameternamelist=[ + parser.Node_docParamNameList( + parametername=[parser.Node_docParamName([p.declname or ""])] + ) + ], + ) + ) + + if params: + content: list[parser.ListItem_descriptionType] = [] + content.append( + parser.TaggedValue[Literal["para"], parser.Node_docParaType]( + "para", + parser.Node_docParaType( + [ + parser.TaggedValue[Literal["parameterlist"], parser.Node_docParamListType]( + "parameterlist", parser.Node_docParamListType(params, kind=parser.DoxParamListKind.param) + ) + ] + ), + ) + ) + title = None + if descr is not None: + content.extend(descr) + title = descr.title + descr = parser.Node_descriptionType(content, title=title) + detailed = self.detaileddescription(descr) return brief + detailed - def detaileddescription(self, node) -> list[Node]: - detailedCand = self.render_optional(node.detaileddescription) + def detaileddescription(self, descr: parser.Node_descriptionType | None) -> list[Node]: + detailedCand = self.render_optional(descr) # all field_lists must be at the top-level of the desc_content, so pull them up fieldLists: list[nodes.field_list] = [] admonitions: list[Node] = [] @@ -1014,7 +1151,7 @@ def pullup(node, typ, dest): para.replace_self(para.children) # and remove empty top-level paragraphs - if isinstance(candNode, nodes.paragraph) and len(candNode) == 0: # pyright: ignore + if isinstance(candNode, nodes.paragraph) and len(candNode) == 0: # pyright: ignore continue detailed.append(candNode) @@ -1026,7 +1163,7 @@ def pullup(node, typ, dest): fieldLists = [fieldList] # collapse retvals into a single return field - if len(fieldLists) != 0 and sphinx.version_info[0:2] < (4, 3): # pyright: ignore + if len(fieldLists) != 0 and sphinx.version_info[0:2] < (4, 3): # pyright: ignore others: list[nodes.field] = [] retvals: list[nodes.field] = [] f: nodes.field @@ -1080,7 +1217,9 @@ def update_signature(self, signature, obj_type): else: signature.insert(0, annotation) - def render_declaration(self, node: parser.Node_memberdefType, declaration=None, description=None, **kwargs): + def render_declaration( + self, node: parser.Node_memberdefType, declaration=None, description=None, **kwargs + ): if declaration is None: declaration = self.get_fully_qualified_name() obj_type = kwargs.get("objtype", None) @@ -1150,16 +1289,9 @@ def visit_union(self, node: HasRefID) -> list[Node]: names.append(nodeDef.compoundname.split("::")[-1]) declaration = self.join_nested_name(names) - def content(contentnode): - if nodeDef.includes: - for include in nodeDef.includes: - contentnode.extend( - self.render(include, new_context.create_child_context(include)) - ) - rendered_data = self.render(file_data, parent_context) - contentnode.extend(rendered_data) - - nodes = self.handle_declaration(nodeDef, nodeDef.kind.value, declaration, content_callback=content) + nodes = self.handle_compounddef_declaration( + nodeDef, nodeDef.kind.value, declaration, file_data, new_context, parent_context + ) return nodes def visit_class(self, node: HasRefID) -> list[Node]: @@ -1202,24 +1334,25 @@ def visit_class(self, node: HasRefID) -> list[Node]: first = False if base.prot is not None and domain != "cs": decls.append(base.prot.value) - if base.virt == "virtual": + if base.virt == parser.DoxVirtualKind.virtual: decls.append("virtual") decls.append(base[0]) declaration = " ".join(decls) - def content(contentnode) -> None: - if nodeDef.includes: - for include in nodeDef.includes: - contentnode.extend( - self.render(include, new_context.create_child_context(include)) - ) - rendered_data = self.render(file_data, parent_context) - contentnode.extend(rendered_data) - - assert kind in (parser.DoxCompoundKind.class_, parser.DoxCompoundKind.struct, parser.DoxCompoundKind.interface) + assert kind in ( + parser.DoxCompoundKind.class_, + parser.DoxCompoundKind.struct, + parser.DoxCompoundKind.interface, + ) display_obj_type = "interface" if kind == parser.DoxCompoundKind.interface else None - nodes = self.handle_declaration( - nodeDef, nodeDef.kind.value, declaration, content_callback=content, display_obj_type=display_obj_type + nodes = self.handle_compounddef_declaration( + nodeDef, + nodeDef.kind.value, + declaration, + file_data, + new_context, + parent_context, + display_obj_type, ) if "members-only" in self.context.directive_args[2]: assert len(nodes) >= 2 @@ -1249,37 +1382,39 @@ def visit_namespace(self, node: HasRefID) -> list[Node]: names.append(nodeDef.compoundname.split("::")[-1]) declaration = self.join_nested_name(names) - def content(contentnode): - if nodeDef.includes: - for include in nodeDef.includes: - contentnode.extend( - self.render(include, new_context.create_child_context(include)) - ) - rendered_data = self.render(file_data, parent_context) - contentnode.extend(rendered_data) - display_obj_type = "namespace" if self.get_domain() != "py" else "module" - nodes = self.handle_declaration( - nodeDef, nodeDef.kind.value, declaration, content_callback=content, display_obj_type=display_obj_type + nodes = self.handle_compounddef_declaration( + nodeDef, + nodeDef.kind.value, + declaration, + file_data, + new_context, + parent_context, + display_obj_type, ) return nodes def visit_compound( - self, - node: HasRefID, - render_empty_node=True, - *, - get_node_info: Callable[[parser.Node_DoxygenType], tuple[str, parser.DoxCompoundKind]] | None = None, - render_signature: Callable[ - [parser.Node_DoxygenType,Sequence[Element],str,parser.DoxCompoundKind], - tuple[list[Node],addnodes.desc_content]] | None = None) -> list[Node]: + self, + node: HasRefID, + render_empty_node=True, + *, + get_node_info: Callable[[parser.Node_DoxygenType], tuple[str, parser.DoxCompoundKind]] + | None = None, + render_signature: Callable[ + [parser.Node_DoxygenType, Sequence[Element], str, parser.DoxCompoundKind], + tuple[list[Node], addnodes.desc_content], + ] + | None = None, + ) -> list[Node]: # Read in the corresponding xml file and process file_data = self.compound_parser.parse(node.refid) assert len(file_data.compounddef) == 1 def def_get_node_info(file_data) -> tuple[str, parser.DoxCompoundKind]: - assert isinstance(node,parser.Node_CompoundType) + assert isinstance(node, parser.Node_CompoundType) return node.name, parser.DoxCompoundKind(node.kind.value) + if get_node_info is None: get_node_info = def_get_node_info @@ -1288,7 +1423,11 @@ def def_get_node_info(file_data) -> tuple[str, parser.DoxCompoundKind]: dom = self.get_domain() assert not dom or dom in ("c", "cpp") return self.visit_union(node) - elif kind in (parser.DoxCompoundKind.struct, parser.DoxCompoundKind.class_, parser.DoxCompoundKind.interface): + elif kind in ( + parser.DoxCompoundKind.struct, + parser.DoxCompoundKind.class_, + parser.DoxCompoundKind.interface, + ): dom = self.get_domain() if not dom or dom in ("c", "cpp", "py", "cs"): return self.visit_class(node) @@ -1306,10 +1445,8 @@ def def_get_node_info(file_data) -> tuple[str, parser.DoxCompoundKind]: return [] def def_render_signature( - file_data: parser.Node_DoxygenType, - doxygen_target, - name, - kind: parser.DoxCompoundKind) -> tuple[list[Node],addnodes.desc_content]: + file_data: parser.Node_DoxygenType, doxygen_target, name, kind: parser.DoxCompoundKind + ) -> tuple[list[Node], addnodes.desc_content]: # Defer to domains specific directive. assert len(file_data.compounddef) == 1 @@ -1351,6 +1488,7 @@ def def_render_signature( rst_node.children[0].insert(0, doxygen_target) return nodes, finder.content + if render_signature is None: render_signature = def_render_signature @@ -1370,7 +1508,9 @@ def def_render_signature( return nodes def visit_file(self, node: parser.Node_CompoundType) -> list[Node]: - def render_signature(file_data, doxygen_target, name, kind) -> tuple[list[Node],addnodes.desc_content]: + def render_signature( + file_data, doxygen_target, name, kind + ) -> tuple[list[Node], addnodes.desc_content]: assert self.context is not None options = self.context.directive_args[2] @@ -1444,19 +1584,23 @@ def render_signature(file_data, doxygen_target, name, kind) -> tuple[list[Node], (parser.DoxSectionKind.define, "Defines"), (parser.DoxSectionKind.prototype, "Prototypes"), (parser.DoxSectionKind.typedef, "Typedefs"), - #(parser.DoxSectionKind.concept, "Concepts"), + # (parser.DoxSectionKind.concept, "Concepts"), (parser.DoxSectionKind.enum, "Enums"), (parser.DoxSectionKind.func, "Functions"), (parser.DoxSectionKind.var, "Variables"), ] - def render_iterable(self, iterable: Iterable[parser.NodeOrValue], tag: str | None = None) -> list[Node]: + def render_iterable( + self, iterable: Iterable[parser.NodeOrValue], tag: str | None = None + ) -> list[Node]: output: list[Node] = [] for entry in iterable: output.extend(self.render(entry, tag=tag)) return output - - def render_tagged_iterable(self, iterable: Iterable[parser.TaggedValue[str,parser.NodeOrValue] | str]) -> list[Node]: + + def render_tagged_iterable( + self, iterable: Iterable[parser.TaggedValue[str, parser.NodeOrValue] | str] + ) -> list[Node]: output: list[Node] = [] for entry in iterable: output.extend(self.render_tagged(entry)) @@ -1484,16 +1628,16 @@ def addnode(kind: str, lam): if "allow-dot-graphs" in options: addnode("incdepgraph", lambda: self.render_optional(node.incdepgraph)) addnode("invincdepgraph", lambda: self.render_optional(node.invincdepgraph)) - addnode( - "inheritancegraph", lambda: self.render_optional(node.inheritancegraph) - ) + addnode("inheritancegraph", lambda: self.render_optional(node.inheritancegraph)) addnode( "collaborationgraph", lambda: self.render_optional(node.collaborationgraph), ) addnode("briefdescription", lambda: self.render_optional(node.briefdescription)) - addnode("detaileddescription", lambda: self.detaileddescription(node)) + addnode( + "detaileddescription", lambda: self.detaileddescription(node.detaileddescription) + ) def render_derivedcompoundref(node): if node is None: @@ -1539,8 +1683,10 @@ def render_derivedcompoundref(node): addnode(kind.value, lambda: section_nodelists.get(kind.value, [])) # Take care of innerclasses - addnode("innerclass", lambda: self.render_iterable(node.innerclass, 'innerclass')) - addnode("innernamespace", lambda: self.render_iterable(node.innernamespace, 'innernamespace')) + addnode("innerclass", lambda: self.render_iterable(node.innerclass, "innerclass")) + addnode( + "innernamespace", lambda: self.render_iterable(node.innernamespace, "innernamespace") + ) if "inner" in options: for cnode in node.innergroup: @@ -1601,21 +1747,23 @@ def visit_sectiondef(self, node: parser.Node_sectiondefType) -> list[Node]: @node_handler(parser.Node_docRefTextType) @node_handler(parser.Node_refTextType) - def visit_docreftext(self, node: parser.Node_docRefTextType | parser.Node_incType | parser.Node_refTextType) -> list[Node]: + def visit_docreftext( + self, node: parser.Node_docRefTextType | parser.Node_incType | parser.Node_refTextType + ) -> list[Node]: nodelist: list[Node] - if isinstance(node,parser.Node_incType): + if isinstance(node, parser.Node_incType): nodelist = self.render_iterable(node) else: nodelist = self.render_tagged_iterable(node) # TODO: "para" in compound.xsd is an empty tag; figure out what this # is supposed to do - for name,value in map(parser.tag_name_value,node): - if name == 'para': + for name, value in map(parser.tag_name_value, node): + if name == "para": nodelist.extend(self.render(value)) - refid = self.get_refid(node.refid or '') + refid = self.get_refid(node.refid or "") assert nodelist nodelist = [ @@ -1728,7 +1876,7 @@ def visit_docimage(self, node: parser.Node_docImageType) -> list[Node]: path_to_image = node.name if path_to_image is None: - path_to_image = '' + path_to_image = "" elif not url_re.match(path_to_image): path_to_image = self.project_info.sphinx_abs_path_to_file(path_to_image) @@ -1765,7 +1913,9 @@ def visit_docmarkup(self, tag: str, node: parser.Node_docMarkupType) -> list[Nod @node_handler(parser.Node_docSect1Type) @node_handler(parser.Node_docSect2Type) @node_handler(parser.Node_docSect3Type) - def visit_docsectN(self, node: parser.Node_docSect1Type | parser.Node_docSect2Type | parser.Node_docSect3Type) -> list[Node]: + def visit_docsectN( + self, node: parser.Node_docSect1Type | parser.Node_docSect2Type | parser.Node_docSect3Type + ) -> list[Node]: """ Docutils titles are defined by their level inside the document so the proper structure is only guaranteed by the Doxygen XML. @@ -1775,7 +1925,7 @@ def visit_docsectN(self, node: parser.Node_docSect1Type | parser.Node_docSect2Ty """ section = nodes.section() section["ids"].append(self.get_refid(node.id)) - title = node.title or '' + title = node.title or "" section += nodes.title(title, title) section += self.create_doxygen_target(node) section += self.render_tagged_iterable(node) @@ -1789,7 +1939,11 @@ def visit_docsimplesect(self, node: parser.Node_docSimpleSectType) -> list[Node] # and it will be pulled up later nodelist = self.render_iterable(node.para) - if node.kind in (parser.DoxSimpleSectKind.pre, parser.DoxSimpleSectKind.post, parser.DoxSimpleSectKind.return_): + if node.kind in ( + parser.DoxSimpleSectKind.pre, + parser.DoxSimpleSectKind.post, + parser.DoxSimpleSectKind.return_, + ): return [ nodes.field_list( "", @@ -1860,11 +2014,13 @@ def visit_listing(self, node: parser.Node_listingType) -> list[Node]: # Add blank string at the start otherwise for some reason it renders # the pending_xref tags around the kind in plain text block = nodes.literal_block("", "", *nodelist) - domain = filetypes.get_pygments_alias(node.filename or '') or filetypes.get_extension(node.filename or '') + domain = filetypes.get_pygments_alias(node.filename or "") or filetypes.get_extension( + node.filename or "" + ) if domain: block["language"] = domain return [block] - + @node_handler(parser.Node_codelineType) def visit_codeline(self, node: parser.Node_codelineType) -> list[Node]: return self.render_iterable(node.highlight) @@ -1963,7 +2119,7 @@ def visit_inc(self, node: parser.Node_incType) -> list[Node]: if not self.app.config.breathe_show_include: return [] - compound_link: list[Node] = [nodes.Text(''.join(node))] + compound_link: list[Node] = [nodes.Text("".join(node))] if node.refid: compound_link = self.visit_docreftext(node) if node.local == "yes": @@ -1976,7 +2132,7 @@ def visit_inc(self, node: parser.Node_incType) -> list[Node]: @node_handler(parser.Node_refType) def visit_ref(self, node: parser.Node_refType) -> list[Node]: def get_node_info(file_data: parser.Node_DoxygenType): - name = ''.join(node) + name = "".join(node) name = name.rsplit("::", 1)[-1] assert len(file_data.compounddef) == 1 return name, file_data.compounddef[0].kind @@ -2166,14 +2322,14 @@ def visit_function(self, node: parser.Node_memberdefType) -> list[Node]: names.append(node.name) name = self.join_nested_name(names) if dom == "py": - declaration = name + (node.argsstring or '') + declaration = name + (node.argsstring or "") elif dom == "cs": declaration = " ".join( [ self.create_template_prefix(node), - "".join(cast(str,n.astext()) for n in self.render(node.type)), + "".join(cast(str, n.astext()) for n in self.render(node.type)), name, - node.argsstring or '', + node.argsstring or "", ] ) else: @@ -2201,7 +2357,7 @@ def visit_function(self, node: parser.Node_memberdefType) -> list[Node]: typ = typ[7:] elements.append(typ) elements.append(name) - elements.append(node.argsstring or '') + elements.append(node.argsstring or "") declaration = " ".join(elements) nodes = self.handle_declaration(node, node.kind.value, declaration) return nodes @@ -2276,7 +2432,8 @@ def visit_define(self, node: parser.Node_memberdefType) -> list[Node]: for i, parameter in enumerate(node.param): if i: declaration += ", " - if parameter.defname: declaration += parameter.defname + if parameter.defname: + declaration += parameter.defname declaration += ")" # TODO: remove this once Sphinx supports definitions for macros @@ -2285,7 +2442,9 @@ def add_definition(declarator: Declarator) -> None: declarator.append(nodes.Text(" ")) declarator.extend(self.render(node.initializer)) - return self.handle_declaration(node, node.kind.value, declaration, declarator_callback=add_definition) + return self.handle_declaration( + node, node.kind.value, declaration, declarator_callback=add_definition + ) def visit_enum(self, node: parser.Node_memberdefType) -> list[Node]: def content(contentnode): @@ -2311,9 +2470,7 @@ def content(contentnode): declaration += underlying_type else: obj_type = "enum" - return self.handle_declaration( - node, obj_type, declaration, content_callback=content - ) + return self.handle_declaration(node, obj_type, declaration, content_callback=content) @node_handler(parser.Node_enumvalueType) def visit_enumvalue(self, node: parser.Node_enumvalueType) -> list[Node]: @@ -2321,7 +2478,11 @@ def visit_enumvalue(self, node: parser.Node_enumvalueType) -> list[Node]: declaration = node.name + self.make_initializer(node) else: declaration = node.name - return self.handle_declaration(node, "enumvalue", declaration) + + def content(contentnode: addnodes.desc_content): + contentnode.extend(self.description(node)) + + return self.handle_declaration(node, "enumvalue", declaration, content_callback=content) def visit_typedef(self, node: parser.Node_memberdefType) -> list[Node]: type_ = "".join(n.astext() for n in self.render(node.type)) @@ -2339,7 +2500,7 @@ def visit_typedef(self, node: parser.Node_memberdefType) -> list[Node]: # contain the full text. If a @typedef was used instead, the # definition has only the typename, which makes it impossible to # distinguish between them so fallback to "typedef" behavior here. - declaration = " ".join([type_, name, node.argsstring or '']) + declaration = " ".join([type_, name, node.argsstring or ""]) return self.handle_declaration(node, node.kind.value, declaration) def make_initializer(self, node) -> str: @@ -2374,7 +2535,7 @@ def visit_variable(self, node: parser.Node_memberdefType) -> list[Node]: self.create_template_prefix(node), "".join(n.astext() for n in self.render(node.type)), name, - node.argsstring or '', + node.argsstring or "", ] ) if node.gettable or node.settable: @@ -2399,7 +2560,7 @@ def visit_variable(self, node: parser.Node_memberdefType) -> list[Node]: typename = typename.replace("::", ".") elements.append(typename) elements.append(name) - elements.append(node.argsstring or '') + elements.append(node.argsstring or "") elements.append(self.make_initializer(node)) declaration = " ".join(elements) if not dom or dom in ("c", "cpp", "py", "cs"): @@ -2457,7 +2618,7 @@ def visit_templateparam( dom = "cpp" appendDeclName = True if insertDeclNameByParsing: - if dom == "cpp" and sphinx.version_info >= (4, 1, 0): # pyright: ignore + if dom == "cpp" and sphinx.version_info >= (4, 1, 0): # pyright: ignore parser = cpp.DefinitionParser( "".join(n.astext() for n in nodelist), location=self.state.state_machine.get_source_and_line(), @@ -2479,7 +2640,7 @@ def visit_templateparam( # the actual nodes don't matter, as it is astext()-ed later nodelist = [nodes.Text(str(ast))] appendDeclName = False - except cpp.DefinitionError: # pyright: ignore + except cpp.DefinitionError: # pyright: ignore # happens with "typename ...Args", so for now, just append pass @@ -2521,7 +2682,7 @@ def visit_docparamlist(self, node: parser.Node_docParamListType) -> list[Node]: """Parameter/Exception/TemplateParameter documentation""" # retval support available on Sphinx >= 4.3 - has_retval = sphinx.version_info[0:2] >= (4, 3) # pyright: ignore + has_retval = sphinx.version_info[0:2] >= (4, 3) # pyright: ignore fieldListName = { parser.DoxParamListKind.param: "param", parser.DoxParamListKind.exception: "throws", @@ -2560,7 +2721,8 @@ def visit_docparamlist(self, node: parser.Node_docParamListType) -> list[Node]: dir = { parser.DoxParamDir.in_: "[in]", parser.DoxParamDir.out: "[out]", - parser.DoxParamDir.inout: "[inout]"}[paramName.direction] + parser.DoxParamDir.inout: "[inout]", + }[paramName.direction] parameterDirectionNodes = [nodes.strong(dir, dir), nodes.Text(" ")] # it seems that Sphinx expects the name to be a single node, # so let's make it that @@ -2585,10 +2747,10 @@ def visit_docparamlist(self, node: parser.Node_docParamListType) -> list[Node]: def visit_docdot(self, node: parser.Node_docDotMscType) -> list[Node]: """Translate node from doxygen's dot command to sphinx's graphviz directive.""" graph_node = graphviz() - str_value = '' + str_value = "" if len(node): val = node[0] - assert isinstance(val,str) + assert isinstance(val, str) str_value = val if str_value.rstrip("\n"): graph_node["code"] = str_value @@ -2610,7 +2772,7 @@ def visit_docdot(self, node: parser.Node_docDotMscType) -> list[Node]: def visit_docdotfile(self, node: parser.Node_docImageFileType) -> list[Node]: """Translate node from doxygen's dotfile command to sphinx's graphviz directive.""" dotcode = "" - dot_file_path: str = node.name or '' + dot_file_path: str = node.name or "" # Doxygen v1.9.3+ uses a relative path to specify the dot file. # Previously, Doxygen used an absolute path. # This relative path is with respect to the XML_OUTPUT path. @@ -2639,7 +2801,7 @@ def visit_docdotfile(self, node: parser.Node_docImageFileType) -> list[Node]: graph_node = graphviz() graph_node["code"] = dotcode graph_node["options"] = {"docname": dot_file_path} - caption = '' if len(node) == 0 else parser.tag_name_value(node[0])[1] + caption = "" if len(node) == 0 else parser.tag_name_value(node[0])[1] if caption: caption_node = nodes.caption(caption, "") caption_node += nodes.Text(caption) @@ -2653,18 +2815,18 @@ def visit_docgraph(self, tag: str, node: parser.Node_graphType) -> list[Node]: assert self.context parent = self.context.node_stack[1].value - assert isinstance(parent,parser.Node_compounddefType) + assert isinstance(parent, parser.Node_compounddefType) direction = "forward" - if tag == 'incdepgraph': + if tag == "incdepgraph": caption = f"Include dependency graph for {parent.compoundname}:" - elif tag == 'invincdepgraph': - direction = 'back' + elif tag == "invincdepgraph": + direction = "back" caption = f"This graph shows which files directly or indirectly include {parent.compoundname}:" - elif tag == 'inheritancegraph': + elif tag == "inheritancegraph": caption = f"Inheritance diagram for {parent.compoundname}:" else: - assert tag == 'collaborationgraph' + assert tag == "collaborationgraph" caption = f"Collaboration diagram for {parent.compoundname}:" # use graphs' legend from doxygen (v1.9.1) @@ -2734,16 +2896,24 @@ def visit_unknown(self, node) -> list[Node]: @node_handler(parser.Node_CompoundType) def dispatch_compound(self, node: parser.Node_CompoundType) -> list[Node]: """Dispatch handling of a compound node to a suitable visit method.""" - if node.kind in [parser.CompoundKind.file, parser.CompoundKind.dir, parser.CompoundKind.page, parser.CompoundKind.example, parser.CompoundKind.group]: + if node.kind in [ + parser.CompoundKind.file, + parser.CompoundKind.dir, + parser.CompoundKind.page, + parser.CompoundKind.example, + parser.CompoundKind.group, + ]: return self.visit_file(node) return self.visit_compound(node) @node_handler(parser.Node_memberdefType) def dispatch_memberdef(self, node: parser.Node_memberdefType) -> list[Node]: """Dispatch handling of a memberdef node to a suitable visit method.""" - if node.kind in (parser.DoxMemberKind.function, parser.DoxMemberKind.signal, parser.DoxMemberKind.slot) or ( - node.kind == parser.DoxMemberKind.friend and node.argsstring - ): + if node.kind in ( + parser.DoxMemberKind.function, + parser.DoxMemberKind.signal, + parser.DoxMemberKind.slot, + ) or (node.kind == parser.DoxMemberKind.friend and node.argsstring): return self.visit_function(node) if node.kind == parser.DoxMemberKind.enum: return self.visit_enum(node) @@ -2766,7 +2936,7 @@ def dispatch_memberdef(self, node: parser.Node_memberdefType) -> list[Node]: @tagged_node_handler(str) def visit_string(self, tag: str, node: str) -> list[Node]: - if tag == 'verbatim': + if tag == "verbatim": return self.visit_verbatim(node) return self.render_string(node) @@ -2799,21 +2969,25 @@ def render_string(self, node: str) -> list[Node]: if node == " ": return [nodes.Text(node)] return [] - - def render_tagged(self, item: parser.TaggedValue[str,parser.NodeOrValue] | str) -> list[Node]: - if isinstance(item,str): return self.render_string(item) + + def render_tagged(self, item: parser.TaggedValue[str, parser.NodeOrValue] | str) -> list[Node]: + if isinstance(item, str): + return self.render_string(item) h = self.tagged_node_handlers.get(type(item.value)) if h is not None: assert self.context is not None with WithContext(self, self.context.create_child_context(item.value, item.name)): - if not self.filter_(NodeStack(self.context.node_stack)): return [] - return h(self,item.name, item.value) + if not self.filter_(NodeStack(self.context.node_stack)): + return [] + return h(self, item.name, item.value) return self.render(item.value) - def render(self, node: parser.NodeOrValue, context: RenderContext | None = None, tag: str | None = None) -> list[Node]: + def render( + self, node: parser.NodeOrValue, context: RenderContext | None = None, tag: str | None = None + ) -> list[Node]: if context is None: assert self.context is not None - context = self.context.create_child_context(node,tag) + context = self.context.create_child_context(node, tag) with WithContext(self, context): assert self.context is not None result: list[Node] = [] diff --git a/examples/specific/cpp_function.h b/examples/specific/cpp_function.h index a09c92e4..2accc5e5 100644 --- a/examples/specific/cpp_function.h +++ b/examples/specific/cpp_function.h @@ -8,6 +8,8 @@ struct Class { void (*f_issue_489)(struct Foo *foo, int value); int f_issue_338() noexcept; + + int anon_params(int, int, int x, char*); }; /** A namespace to demonstrate a namespaced function */ diff --git a/examples/specific/group_content_only.hpp b/examples/specific/group_content_only.hpp new file mode 100644 index 00000000..7a35eb44 --- /dev/null +++ b/examples/specific/group_content_only.hpp @@ -0,0 +1,17 @@ + +/// @defgroup structy_group StructyGroup +/// @{ + + +/// Hello +typedef struct { + const unsigned char* data_1; + unsigned int size_1; + const unsigned char* data_2; + unsigned int size_2; +} Structy; + + +/// @} + + diff --git a/examples/specific/inheritance.h b/examples/specific/inheritance.h index 4ef539d8..391e83a0 100644 --- a/examples/specific/inheritance.h +++ b/examples/specific/inheritance.h @@ -7,3 +7,7 @@ class Main : public BaseA, BaseB {}; class ChildA : public Main {}; class ChildB : public Main {}; + +class ChildV1 : virtual public BaseA {}; +class ChildV2 : virtual public BaseA {}; +class ChildV3 : public ChildV1, ChildV2 {}; diff --git a/mkrelease b/mkrelease index 545a75fa..767c5642 100755 --- a/mkrelease +++ b/mkrelease @@ -40,7 +40,7 @@ pack() tar -xf "breathe-$version.tar.gz" cd -- "breathe-$version" - python3 setup.py sdist bdist_wheel + python3 -m build mv -- dist .. cd -- .. diff --git a/tests/data/examples/test_cpp_function/compare.xml b/tests/data/examples/test_cpp_function/compare.xml new file mode 100644 index 00000000..38cccec1 --- /dev/null +++ b/tests/data/examples/test_cpp_function/compare.xml @@ -0,0 +1,78 @@ + + + + + + struct Foo + + + + + + struct Class + + + Public Functions + + + virtual void f1 volatile const & = 0 + + + + + + virtual void f2 volatile const && = 0 + + + + + + int f_issue_338 noexcept + + + + + + int anon_paramsintintint xchar* + + + + + + Public Members + + + void (*f_issue_489)(struct Foo *foo, int value) + + + + + + Public Static Functions + + + static void f3 + + + + + + + + + namespace TestNamespaceFunction + + A namespace to demonstrate a namespaced function. + + Functions + + + void namespaceFunc + + A function within a namspace. + + + + + + diff --git a/tests/data/examples/test_cpp_function/input.rst b/tests/data/examples/test_cpp_function/input.rst new file mode 100644 index 00000000..ba4a2135 --- /dev/null +++ b/tests/data/examples/test_cpp_function/input.rst @@ -0,0 +1 @@ +.. doxygenfile:: cpp_function.h diff --git a/tests/data/examples/test_group_content_only/compare.xml b/tests/data/examples/test_group_content_only/compare.xml new file mode 100644 index 00000000..25d27d92 --- /dev/null +++ b/tests/data/examples/test_group_content_only/compare.xml @@ -0,0 +1,11 @@ + + + + + + struct Structy + + Hello. + + + diff --git a/tests/data/examples/test_group_content_only/input.rst b/tests/data/examples/test_group_content_only/input.rst new file mode 100644 index 00000000..2d1eb0bc --- /dev/null +++ b/tests/data/examples/test_group_content_only/input.rst @@ -0,0 +1,2 @@ +.. doxygengroup:: structy_group + :content-only: diff --git a/tests/data/examples/test_inheritance/compare.xml b/tests/data/examples/test_inheritance/compare.xml new file mode 100644 index 00000000..c9480e0e --- /dev/null +++ b/tests/data/examples/test_inheritance/compare.xml @@ -0,0 +1,58 @@ + + + + + + class BaseA + + Subclassed by ChildV1, ChildV2, Main + + + + + class BaseB + + Subclassed by Main + + + + + class Main : public BaseA, private BaseB + + This is the main class we’re interested in. + Subclassed by ChildA, ChildB + + + + + class ChildA : public Main + + + + + + class ChildB : public Main + + + + + + class ChildV1 : public virtual BaseA + + Subclassed by ChildV3 + + + + + class ChildV2 : public virtual BaseA + + Subclassed by ChildV3 + + + + + class ChildV3 : public ChildV1, private ChildV2 + + + + diff --git a/tests/data/examples/test_inheritance/input.rst b/tests/data/examples/test_inheritance/input.rst new file mode 100644 index 00000000..95016b22 --- /dev/null +++ b/tests/data/examples/test_inheritance/input.rst @@ -0,0 +1 @@ +.. doxygenfile:: inheritance.h diff --git a/tests/data/examples/test_inline/compare.xml b/tests/data/examples/test_inline/compare.xml new file mode 100644 index 00000000..2faf049d --- /dev/null +++ b/tests/data/examples/test_inline/compare.xml @@ -0,0 +1,49 @@ + + + + + + class InlineTest + + A class to demonstrate inline documentation syntax. + + Public Functions + + + const char *memberchar cint n + + A member function. + Details about member function + + + Parameters + + + + c – c a character. + + + n – n an integer. + + + + + + Throws + + std::out_of_range – parameter is out of range. + + + + Returns + + a character pointer. + + + + + + + + + diff --git a/tests/data/examples/test_inline/input.rst b/tests/data/examples/test_inline/input.rst new file mode 100644 index 00000000..18296c17 --- /dev/null +++ b/tests/data/examples/test_inline/input.rst @@ -0,0 +1,2 @@ +.. doxygenclass:: InlineTest + :members: diff --git a/tests/data/examples/test_latexmath/compare.xml b/tests/data/examples/test_latexmath/compare.xml new file mode 100644 index 00000000..21cc94ac --- /dev/null +++ b/tests/data/examples/test_latexmath/compare.xml @@ -0,0 +1,15 @@ + + + + + + class MathHelper + + A class. + A inline formula: f(x) = a + b + A display style formula: +\int_a^b f(x) dx = F(b) - F(a) + + + + diff --git a/tests/data/examples/test_latexmath/input.rst b/tests/data/examples/test_latexmath/input.rst new file mode 100644 index 00000000..686fc066 --- /dev/null +++ b/tests/data/examples/test_latexmath/input.rst @@ -0,0 +1 @@ +.. doxygenclass:: MathHelper diff --git a/tests/data/examples/test_links/compare.xml b/tests/data/examples/test_links/compare.xml new file mode 100644 index 00000000..32ddb67f --- /dev/null +++ b/tests/data/examples/test_links/compare.xml @@ -0,0 +1,12 @@ + + + + + + class LinksTest + + first struct inside of namespace + This is a longer description with a link to a webpage in the text http://www.github.com in order to test out Breathe’s handling of links. + + + diff --git a/tests/data/examples/test_links/input.rst b/tests/data/examples/test_links/input.rst new file mode 100644 index 00000000..70aca7b6 --- /dev/null +++ b/tests/data/examples/test_links/input.rst @@ -0,0 +1 @@ +.. doxygenclass:: LinksTest diff --git a/tests/data/examples/test_lists/compare.xml b/tests/data/examples/test_lists/compare.xml new file mode 100644 index 00000000..e45d6f29 --- /dev/null +++ b/tests/data/examples/test_lists/compare.xml @@ -0,0 +1,103 @@ + + + + + + class SimpleList_1 + + This is a list example. + Following is a list using ‘+’ for bullets:One item.Two items.Three items.Four. + And this is some more text. + + + + + class SimpleList_2 + + This is a list example. + Following is a list using ‘-’ for bullets:One item.Two items.Three items.Four. + And this is some more text. + + + + + class SimpleList_3 + + This is a list example. + Following is a list using ‘*’ for bullets:One item.Two items.Three items.Four. + And this is some more text. + + + + + class SimpleList_4 + + This is a list example. + Following is an auto-numbered list:One item.Two items.Three items.Four. + And this is some more text. + + + + + class SimpleList_5 + + This is a list example. + Following is a numbered list:One item.Two items.Three items.Four. + And this is some more text. + + + + + class SimpleList_6 + + This is a list example. + Following is an unordered list using ‘HTML’ tags: One item. Two items. Three items. Four. + And this is some more text. + + + + + class NestedLists_1 + + A list of events: + mouse eventsmouse move eventmouse click eventMore info about the click event.mouse double click eventkeyboard eventskey down eventkey up event + More text here. + + + + + class NestedLists_2 + + Text before the list. + list item 1sub item 1sub sub item 1sub sub item 2 + The dot above ends the sub sub item list.More text for the first sub item + The dot above ends the first sub item.More text for the first list itemsub item 2sub item 3list item 2 + More text in the same paragraph. + More text in a new paragraph. + + + + + class NestedLists_3 + + A list of events: mouse events mouse move event mouse click eventMore info about the click event.mouse double click event keyboard events key down event key up event + More text here. + + + + + class NestedLists_4 + + A list of events: + mouse eventsmouse move eventswipe eventcircle eventwave eventmouse click eventMore info about the click event.mouse double click eventkeyboard eventskey down eventkey up eventtouch eventspinch eventswipe event More text here. + + + + + class NestedLists_5 + + A deeply nested list of events: + mouse eventsmouse move eventswipe eventswipe leftswipe rightcircle eventwave eventmouse click eventMore info about the click event.mouse double click eventkeyboard eventskey down eventkey up eventtouch eventspinch eventswipe event More text here. + + + diff --git a/tests/data/examples/test_lists/input.rst b/tests/data/examples/test_lists/input.rst new file mode 100644 index 00000000..1449fe78 --- /dev/null +++ b/tests/data/examples/test_lists/input.rst @@ -0,0 +1 @@ +.. doxygenfile:: lists.h diff --git a/tests/data/examples/test_membergroups/compare.xml b/tests/data/examples/test_membergroups/compare.xml new file mode 100644 index 00000000..9bf2285e --- /dev/null +++ b/tests/data/examples/test_membergroups/compare.xml @@ -0,0 +1,38 @@ + + + + + + class GroupedMembers + + demonstrates member groups + + myGroup + + + void in_mygroup_oneint myParameter + + A function. + + + + + void in_mygroup_twoint myParameter + + Another function. + + + + + Public Functions + + + void not_in_mygroupint myParameter + + This one is not in myGroup. + + + + + + diff --git a/tests/data/examples/test_membergroups/input.rst b/tests/data/examples/test_membergroups/input.rst new file mode 100644 index 00000000..d9c28137 --- /dev/null +++ b/tests/data/examples/test_membergroups/input.rst @@ -0,0 +1,2 @@ +.. doxygenclass:: GroupedMembers + :members: diff --git a/tests/data/examples/test_qtsignalsandslots/compare.xml b/tests/data/examples/test_qtsignalsandslots/compare.xml new file mode 100644 index 00000000..7b9bcdef --- /dev/null +++ b/tests/data/examples/test_qtsignalsandslots/compare.xml @@ -0,0 +1,61 @@ + + + + + + class QtSignalSlotExample : public QObject + + + Public Functions + + + inline void workingFunctionint iShownParameter + + + + Parameters + + iShownParameter – This is shown in declaration + + + + + + + + Public Slots + + + inline void workingSlotint iShown + + + + Parameters + + iShown – This is in function declaration + + + + + + + + Signals + + + void workingSignalint iShown + + + + Parameters + + iShown – This is in function declaration + + + + + + + + + diff --git a/tests/data/examples/test_qtsignalsandslots/input.rst b/tests/data/examples/test_qtsignalsandslots/input.rst new file mode 100644 index 00000000..0872b435 --- /dev/null +++ b/tests/data/examples/test_qtsignalsandslots/input.rst @@ -0,0 +1 @@ +.. doxygenfile:: qtsignalsandslots.h diff --git a/tests/data/examples/test_simplesect/compare.xml b/tests/data/examples/test_simplesect/compare.xml new file mode 100644 index 00000000..139e4d8e --- /dev/null +++ b/tests/data/examples/test_simplesect/compare.xml @@ -0,0 +1,49 @@ + + + + + + template<typename T1, typename T2>void fint afloat bstd::string c + + see, f_raw sa, f_raw Remarkremark, 1 Remarkremark, 2 Remarkremarks, 1 Remarkremarks, 2 par, something + + note, be careful + + + warning, don’t do this + + + + Pre + + stuff must be correct + + + + Pre + + more stuff must be correct + + + + Post + + stuff will be nice + + + + Post + + more stuff will be nice + + + + Returns + + nothing + + + + + + diff --git a/tests/data/examples/test_simplesect/input.rst b/tests/data/examples/test_simplesect/input.rst new file mode 100644 index 00000000..31cf7aaa --- /dev/null +++ b/tests/data/examples/test_simplesect/input.rst @@ -0,0 +1 @@ +.. doxygenfunction:: f diff --git a/tests/data/examples/test_tables/compare.xml b/tests/data/examples/test_tables/compare.xml new file mode 100644 index 00000000..767fbc86 --- /dev/null +++ b/tests/data/examples/test_tables/compare.xml @@ -0,0 +1,54 @@ + + + + + + class Table_1 + + This is a simple Markdown table example. + Following is a simple table using Markdown syntax. + First Header Second Header Content Cell Content Cell Content Cell Content Cell
+ And this is some more text. +
+
+ + + class Table_2 + + This is a Markdown table with alignment. + Following is a table with alignment using Markdown syntax. + Right Center Left 10 10 10 1000 1000 1000
+ And this is some more text. +
+
+ + + class Table_3 + + This is a Markdown table with rowspan and alignment. + Following is a table with rowspan and alignment using Markdown syntax. + Right Center Left 10 10 10 1000 1000
+ And this is some more text. +
+
+ + + class Table_4 + + This is a Markdown table with colspan and alignment. + Following is a table with colspan and alignment using Markdown syntax. + Right Center Left 10 10 10 1000
+ And this is some more text. +
+
+ + + class Table_5 + + This is a Doxygen table. + Following is a table using Doxygen syntax (and all supported features). + Column 1 Column 2 Column 3 cell row=1+2,col=1cell row=1,col=2cell row=1,col=3 cell row=2+3,col=2 cell row=2,col=3 cell row=3,col=1 cell row=3+4,col=3 cell row=4,col=1+2 cell row=5,col=1 cell row=5,col=2+3 cell row=6+7,col=1+2 cell row=6,col=3 cell row=7,col=3 cell row=8,col=1 cell row=8,col=2
Inner cell row=1,col=1Inner cell row=1,col=2 Inner cell row=2,col=1Inner cell row=2,col=2
cell row=8,col=3 Item 1 Item 2 + And this is some more text. +
+
+
diff --git a/tests/data/examples/test_tables/input.rst b/tests/data/examples/test_tables/input.rst new file mode 100644 index 00000000..56e6e97e --- /dev/null +++ b/tests/data/examples/test_tables/input.rst @@ -0,0 +1 @@ +.. doxygenfile:: tables.h diff --git a/tests/data/examples/test_template_function/compare.xml b/tests/data/examples/test_template_function/compare.xml new file mode 100644 index 00000000..af01fabc --- /dev/null +++ b/tests/data/examples/test_template_function/compare.xml @@ -0,0 +1,99 @@ + + + + + Functions + + + template<typename T>T function1T arg1 + + a function with one template arguments + + + Template Parameters + + T – this is the template parameter + + + + Parameters + + arg1 – argument of type T + + + + Returns + + return value of type T + + + + + + + + template<>std::string function1<std::string>std::string arg1 + + a function with one template argument specialized for std::string + + + Parameters + + arg1 – argument of type std::string + + + + Returns + + return value of type std::string + + + + + + + + template<typename T, typename U, int N>T function2T arg1U arg2 + + a function with three template arguments + + + Template Parameters + + + + T – this is the first template parameter + + + U – this is the second template parameter + + + N – this is the third template parameter, it is a non-type parameter + + + + + + Parameters + + + + arg1 – first argument of type T + + + arg2 – second argument of type U + + + + + + Returns + + return value of type T + + + + + + + diff --git a/tests/data/examples/test_template_function/input.rst b/tests/data/examples/test_template_function/input.rst new file mode 100644 index 00000000..e11f51e6 --- /dev/null +++ b/tests/data/examples/test_template_function/input.rst @@ -0,0 +1 @@ +.. doxygenfile:: template_function.h diff --git a/tests/data/examples/test_template_type_alias/compare.xml b/tests/data/examples/test_template_type_alias/compare.xml new file mode 100644 index 00000000..4623cb2c --- /dev/null +++ b/tests/data/examples/test_template_type_alias/compare.xml @@ -0,0 +1,47 @@ + + + + + Typedefs + + + template<typename T>using IsFuzzy = std::is_fuzzy<T> + + a type alias with one template argument + + + Template Parameters + + T – this is the template parameter + + + + + + + + template<typename T, typename U, int N>using IsFurry = std::is_furry<T, U, N> + + a type alias with three template arguments + + + Template Parameters + + + + T – this is the first template parameter + + + U – this is the second template parameter + + + N – this is the third template parameter, it is a non-type parameter + + + + + + + + + diff --git a/tests/data/examples/test_template_type_alias/input.rst b/tests/data/examples/test_template_type_alias/input.rst new file mode 100644 index 00000000..3bdd3944 --- /dev/null +++ b/tests/data/examples/test_template_type_alias/input.rst @@ -0,0 +1 @@ +.. doxygenfile:: template_type_alias.h diff --git a/tests/data/examples/test_union/compare.xml b/tests/data/examples/test_union/compare.xml new file mode 100644 index 00000000..fad965fc --- /dev/null +++ b/tests/data/examples/test_union/compare.xml @@ -0,0 +1,111 @@ + + + + + + union SeparateUnion + + A union of two values. + + Public Members + + + int size + + The size of the thing. + + + + + float depth + + How deep it is. + + + + + + + + class ClassWithUnion + + A class with a union. + + + class ExtraClass + + Documented class. + + Private Members + + + int a_member + + + + + + float another_member + + + + + + + + + union UnionInClass + + A union with two values. + + Public Members + + + int intvalue + + An int value. + + + + + float floatvalue + + A float value. + + + + + + + + + + namespace foo + + + + union MyUnion + + A union of two values. + + Public Members + + + int someInt + + The int of it all. + + + + + float someFloat + + The float side of things. + + + + + + + + diff --git a/tests/data/examples/test_union/input.rst b/tests/data/examples/test_union/input.rst new file mode 100644 index 00000000..87bd2c73 --- /dev/null +++ b/tests/data/examples/test_union/input.rst @@ -0,0 +1 @@ +.. doxygenfile:: union.h diff --git a/tests/data/examples/test_userdefined/compare.xml b/tests/data/examples/test_userdefined/compare.xml new file mode 100644 index 00000000..63135fa5 --- /dev/null +++ b/tests/data/examples/test_userdefined/compare.xml @@ -0,0 +1,51 @@ + + + + + + class UserDefinedGroupTest + + A class. + More details about the UserDefinedGroupTest class + + Custom Group + Description of custom group + + + void func1InCustomGroup + + Function 1 in custom group. + Details. + + + + + void func2InCustomGroup + + Function 2 in custom group. + Details. + + + + + Public Functions + + + void func1InGroup1 + + Same documentation for both members. + Details + + + + + void ungroupedFunction + + Function without group. + Details. + + + + + + diff --git a/tests/data/examples/test_userdefined/input.rst b/tests/data/examples/test_userdefined/input.rst new file mode 100644 index 00000000..337804a4 --- /dev/null +++ b/tests/data/examples/test_userdefined/input.rst @@ -0,0 +1,3 @@ +.. doxygenclass:: UserDefinedGroupTest + :members: + :protected-members: diff --git a/tests/test_examples.py b/tests/test_examples.py index 8cf7686b..10fd00a0 100644 --- a/tests/test_examples.py +++ b/tests/test_examples.py @@ -23,12 +23,13 @@ JAVADOC_AUTOBRIEF = YES GENERATE_HTML = NO GENERATE_XML = YES +WARN_IF_UNDOCUMENTED = NO ALIASES = "rst=\\verbatim embed:rst" ALIASES += "endrst=\\endverbatim" ALIASES += "inlinerst=\\verbatim embed:rst:inline" """ -C_FILE_SUFFIXES = frozenset(('.h', '.c', '.cpp')) +C_FILE_SUFFIXES = frozenset(('.h', '.c', '.hpp', '.cpp')) IGNORED_ELEMENTS = frozenset(()) BUFFER_SIZE = 0x1000 @@ -46,6 +47,11 @@ class XMLElement: line_no: int column_no: int +@dataclasses.dataclass +class XMLElementEnd: + line_no: int + column_no: int + @dataclasses.dataclass class XMLTextElement: value: str @@ -89,7 +95,11 @@ def handle_start(name,attr): def handle_end(_): dispatch_text() - pending_events.append((XMLEventType.E_END,None)) + pending_events.append(( + XMLEventType.E_END, + XMLElementEnd( + p.CurrentLineNumber, + p.CurrentColumnNumber))) p.EndElementHandler = handle_end def handle_text(data): @@ -193,4 +203,4 @@ def test_example(make_app, tmp_path, test_input): o_value = o_node.attr[key] assert o_value == value, f'wrong value for attribute "{key}" at line {o_node.line_no}: expected "{value}", found "{o_value}"' elif o_type == XMLEventType.E_TEXT: - assert o_node.value == c_node.value, f'wrong content at line {o_node.line_no}: expected "{c_node}", found "{o_node}"' + assert o_node.value == c_node.value, f'wrong content at line {o_node.line_no}: expected "{c_node.value}", found "{o_node.value}"' diff --git a/xml_parser_generator/module_template.c b/xml_parser_generator/module_template.c index 276a5b91..3d8f2e36 100644 --- a/xml_parser_generator/module_template.c +++ b/xml_parser_generator/module_template.c @@ -39,6 +39,17 @@ is broken into chunks. */ #define Py_TPFLAGS_SEQUENCE 0 #endif +#if PY_VERSION_HEX >= 0x030900f0 +#define COMPAT_Py_GenericAlias Py_GenericAlias +#else +/* Before Python 3.9, there was no types.GenericAlias class, so just return the +class unchanged */ +static PyObject *COMPAT_Py_GenericAlias(PyObject *cls, PyObject *Py_UNUSED(val)) { + Py_INCREF(cls); + return cls; +} +#endif + enum { CLASS_NODE = 0, /* important: module_exec() assumes this comes before CLASS_FROZEN_LIST */ @@ -363,6 +374,11 @@ static PyObject *tagged_value_tp_new(PyTypeObject *subtype,PyObject *args,PyObje return (PyObject*)r; } +static PyMethodDef tagged_value_methods[] = { + {"__class_getitem__", COMPAT_Py_GenericAlias, METH_O|METH_CLASS, PyDoc_STR("See PEP 585")}, + {NULL} +}; + static PyType_Slot tagged_value_slots[] = { {Py_tp_new,tagged_value_tp_new}, {Py_tp_members,tagged_value_members}, @@ -370,6 +386,7 @@ static PyType_Slot tagged_value_slots[] = { {Py_sq_length,tagged_value_size}, {Py_sq_item,tagged_value_item}, {Py_tp_traverse,tagged_value_traverse}, + {Py_tp_methods,tagged_value_methods}, {0,NULL}}; @@ -527,6 +544,12 @@ static PyObject *frozen_list_tp_iter(frozen_list *self) { return (PyObject*)r; } +static PyMethodDef frozen_list_methods[] = { + {"__class_getitem__", COMPAT_Py_GenericAlias, METH_O|METH_CLASS, PyDoc_STR("See PEP 585")}, + {NULL} +}; + + static PyType_Slot frozen_list_slots[] = { {Py_tp_iter,frozen_list_tp_iter}, {Py_tp_new,frozen_list_tp_new}, @@ -534,6 +557,7 @@ static PyType_Slot frozen_list_slots[] = { {Py_sq_length,frozen_list_size}, {Py_sq_item,frozen_list_item}, {Py_tp_traverse,frozen_list_traverse}, + {Py_tp_methods,frozen_list_methods}, {0,NULL} }; @@ -561,6 +585,7 @@ static PyObject *frozen_list_itr_length_hint(frozen_list_itr *self,PyObject *Py_ static PyMethodDef frozen_list_itr_methods[] = { {"__length_hint__",(PyCFunction)frozen_list_itr_length_hint,METH_NOARGS,NULL}, + {"__class_getitem__", COMPAT_Py_GenericAlias, METH_O|METH_CLASS, PyDoc_STR("See PEP 585")}, {NULL} }; @@ -2153,10 +2178,11 @@ static int module_exec(PyObject *module) { frozen_list_bases = PyTuple_New(2); if(frozen_list_bases == NULL) goto error; - PyTuple_SetItem(frozen_list_bases,0,(PyObject*)state->classes[i]); - Py_INCREF(state->classes[i]); - PyTuple_SetItem(frozen_list_bases,1,(PyObject*)state->classes[CLASS_NODE]); + + PyTuple_SetItem(frozen_list_bases,0,(PyObject*)state->classes[CLASS_NODE]); Py_INCREF(state->classes[CLASS_NODE]); + PyTuple_SetItem(frozen_list_bases,1,(PyObject*)state->classes[i]); + Py_INCREF(state->classes[i]); node_bases = PyTuple_New(1); if(node_bases == NULL) goto error; diff --git a/xml_parser_generator/stubs_template.pyi b/xml_parser_generator/stubs_template.pyi index c101eff6..1f67d4f7 100644 --- a/xml_parser_generator/stubs_template.pyi +++ b/xml_parser_generator/stubs_template.pyi @@ -19,8 +19,11 @@ class FrozenList(Generic[T]): def __iter__(self) -> FrozenListItr[T]: ... class TaggedValue(Generic[T, U]): - name: T - value: U + @property + def name(self) -> T: ... + + @property + def value(self) -> U: ... def __init__(self, name: T, value: U): ... @@ -109,7 +112,7 @@ ListItem_{$ type $} = ( {$ "invalid content type"|error $} //% endif //% if type is used_directly -class Node_{$ type $}({$ 'FrozenList['~list_item_type~'], ' if type is list_e $}Node): +class Node_{$ type $}(Node{$ ', FrozenList['~list_item_type~']' if type is list_e $}): {$ emit_fields(type) $} def __init__(self{$ ', __items: Iterable['~list_item_type~'], /' if type is list_e $} {%- if type|field_count -%}, * From 918525e104f61850e207b9594a9fee7a44ac0f16 Mon Sep 17 00:00:00 2001 From: Rouslan Korneychuk Date: Sun, 10 Dec 2023 20:33:48 -0500 Subject: [PATCH 28/65] Another little fix --- breathe/renderer/sphinxrenderer.py | 4 ++-- xml_parser_generator/module_template.c | 13 +++++++++---- 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/breathe/renderer/sphinxrenderer.py b/breathe/renderer/sphinxrenderer.py index ed8fd96d..c4999374 100644 --- a/breathe/renderer/sphinxrenderer.py +++ b/breathe/renderer/sphinxrenderer.py @@ -2122,7 +2122,7 @@ def visit_inc(self, node: parser.Node_incType) -> list[Node]: compound_link: list[Node] = [nodes.Text("".join(node))] if node.refid: compound_link = self.visit_docreftext(node) - if node.local == "yes": + if node.local: text = [nodes.Text('#include "'), *compound_link, nodes.Text('"')] else: text = [nodes.Text("#include <"), *compound_link, nodes.Text(">")] @@ -2258,7 +2258,7 @@ def visit_docanchor(self, node: parser.Node_docAnchorType) -> list[Node]: def visit_docentry(self, node: parser.Node_docEntryType) -> list[Node]: col = nodes.entry() col += self.render_iterable(node.para) - if node.thead == "yes": + if node.thead: col["heading"] = True if node.rowspan: col["morerows"] = int(node.rowspan) - 1 diff --git a/xml_parser_generator/module_template.c b/xml_parser_generator/module_template.c index 3d8f2e36..0e24d605 100644 --- a/xml_parser_generator/module_template.c +++ b/xml_parser_generator/module_template.c @@ -1,9 +1,14 @@ #define PY_SSIZE_T_CLEAN #include +#ifdef PARSER_PY_LIMITED_API /* Py_LIMITED_API isn't compatible with Py_TRACE_REFS */ -#if !defined(Py_TRACE_REFS) && defined(PARSER_PY_LIMITED_API) -#define Py_LIMITED_API PARSER_PY_LIMITED_API +# if !defined(Py_TRACE_REFS) +# define Py_LIMITED_API PARSER_PY_LIMITED_API +# endif +# define PARSER_PY_VERSION_HEX PARSER_PY_LIMITED_API +#else +# define PARSER_PY_VERSION_HEX PY_VERSION_HEX #endif #include @@ -39,7 +44,7 @@ is broken into chunks. */ #define Py_TPFLAGS_SEQUENCE 0 #endif -#if PY_VERSION_HEX >= 0x030900f0 +#if PARSER_PY_VERSION_HEX >= 0x03090000 #define COMPAT_Py_GenericAlias Py_GenericAlias #else /* Before Python 3.9, there was no types.GenericAlias class, so just return the @@ -2296,7 +2301,7 @@ initialization is used, so for now, single-phase initialization is used. static PyModuleDef_Slot m_slots[] = { {Py_mod_exec,module_exec}, -#if PY_VERSION_HEX >= 0x030c00f0 +#if PARSER_PY_VERSION_HEX >= 0x030c00f0 {Py_mod_multiple_interpreters,Py_MOD_PER_INTERPRETER_GIL_SUPPORTED}, #endif {0,NULL}}; From ec4010e886da2b315fd0e4ad41672e42b3e5bba2 Mon Sep 17 00:00:00 2001 From: Rouslan Korneychuk Date: Mon, 11 Dec 2023 23:18:10 -0500 Subject: [PATCH 29/65] Uncoupled test and example code and more tests --- breathe/parser.py | 6 +- examples/specific/cpp_function.h | 2 - examples/specific/inheritance.h | 4 - tests/data/auto/auto_class.h | 12 + tests/data/auto/auto_function.h | 6 + tests/data/auto/compare.xml | 45 +++ tests/data/auto/input.rst | 2 + tests/data/examples/test_alias/alias.h | 15 + tests/data/examples/test_array/array.h | 13 + tests/data/examples/test_c_enum/c_enum.h | 63 ++++ tests/data/examples/test_c_file/c_file.h | 53 +++ tests/data/examples/test_class/class.cpp | 12 + tests/data/examples/test_class/class.h | 137 +++++++ .../examples/test_code_blocks/code_blocks.h | 36 ++ .../examples/test_cpp_concept/cpp_concept.h | 5 + tests/data/examples/test_cpp_enum/cpp_enum.h | 15 + .../test_cpp_friendclass/cpp_friendclass.h | 7 + .../examples/test_cpp_function/cpp_function.h | 19 + .../cpp_inherited_members.h | 16 + .../cpp_trailing_return_type.h | 7 + tests/data/examples/test_define/define.h | 31 ++ .../data/examples/test_dot_graphs/compare.xml | 3 +- .../examples/test_dot_graphs/dot_graphs.h | 22 ++ .../data/examples/test_dot_graphs/dotfile.dot | 47 +++ tests/data/examples/test_group/group.h | 90 +++++ .../group_content_only.hpp | 0 tests/data/examples/test_headings/headings.h | 18 + tests/data/examples/test_image/compare.xml | 14 + tests/data/examples/test_image/image.h | 9 + tests/data/examples/test_image/input.rst | 1 + tests/data/examples/test_image/pixel.png | Bin 0 -> 70 bytes tests/data/examples/test_index/compare.xml | 341 ++++++++++++++++++ tests/data/examples/test_index/index.h | 43 +++ tests/data/examples/test_index/input.rst | 2 + .../examples/test_inheritance/inheritance.h | 13 + tests/data/examples/test_inline/inline.h | 17 + .../data/examples/test_latexmath/latexmath.h | 18 + tests/data/examples/test_links/links.h | 9 + tests/data/examples/test_lists/lists.h | 201 +++++++++++ .../examples/test_membergroups/membergroups.h | 13 + .../examples/test_param_dirs}/param_dirs.h | 0 .../qtsignalsandslots.h | 40 ++ tests/data/examples/test_rst/rst.h | 77 ++++ .../examples/test_simplesect/simplesect.h | 19 + tests/data/examples/test_tables/tables.h | 96 +++++ .../template_class_non_type.h | 38 ++ .../test_template_function/compare.xml | 7 + .../template_function.h | 52 +++ .../template_type_alias.h | 20 + tests/data/examples/test_union/compare.xml | 53 --- tests/data/examples/test_union/input.rst | 3 +- tests/data/examples/test_union/union.h | 38 ++ .../examples/test_userdefined/userdefined.h | 31 ++ tests/data/examples/test_xrefsect/compare.xml | 41 +++ .../examples/test_xrefsect/extra_dox_opts.txt | 1 + tests/data/examples/test_xrefsect/input.rst | 1 + tests/data/examples/test_xrefsect/xrefsect.h | 34 ++ tests/test_examples.py | 100 ++--- tests/test_parser.py | 67 ++++ 59 files changed, 1976 insertions(+), 109 deletions(-) create mode 100644 tests/data/auto/auto_class.h create mode 100644 tests/data/auto/auto_function.h create mode 100644 tests/data/auto/compare.xml create mode 100644 tests/data/auto/input.rst create mode 100644 tests/data/examples/test_alias/alias.h create mode 100644 tests/data/examples/test_array/array.h create mode 100644 tests/data/examples/test_c_enum/c_enum.h create mode 100644 tests/data/examples/test_c_file/c_file.h create mode 100644 tests/data/examples/test_class/class.cpp create mode 100644 tests/data/examples/test_class/class.h create mode 100644 tests/data/examples/test_code_blocks/code_blocks.h create mode 100644 tests/data/examples/test_cpp_concept/cpp_concept.h create mode 100644 tests/data/examples/test_cpp_enum/cpp_enum.h create mode 100644 tests/data/examples/test_cpp_friendclass/cpp_friendclass.h create mode 100644 tests/data/examples/test_cpp_function/cpp_function.h create mode 100644 tests/data/examples/test_cpp_inherited_members/cpp_inherited_members.h create mode 100644 tests/data/examples/test_cpp_trailing_return_type/cpp_trailing_return_type.h create mode 100644 tests/data/examples/test_define/define.h create mode 100644 tests/data/examples/test_dot_graphs/dot_graphs.h create mode 100644 tests/data/examples/test_dot_graphs/dotfile.dot create mode 100644 tests/data/examples/test_group/group.h rename {examples/specific => tests/data/examples/test_group_content_only}/group_content_only.hpp (100%) create mode 100644 tests/data/examples/test_headings/headings.h create mode 100644 tests/data/examples/test_image/compare.xml create mode 100644 tests/data/examples/test_image/image.h create mode 100644 tests/data/examples/test_image/input.rst create mode 100644 tests/data/examples/test_image/pixel.png create mode 100644 tests/data/examples/test_index/compare.xml create mode 100644 tests/data/examples/test_index/index.h create mode 100644 tests/data/examples/test_index/input.rst create mode 100644 tests/data/examples/test_inheritance/inheritance.h create mode 100644 tests/data/examples/test_inline/inline.h create mode 100644 tests/data/examples/test_latexmath/latexmath.h create mode 100644 tests/data/examples/test_links/links.h create mode 100644 tests/data/examples/test_lists/lists.h create mode 100644 tests/data/examples/test_membergroups/membergroups.h rename {examples/specific => tests/data/examples/test_param_dirs}/param_dirs.h (100%) create mode 100644 tests/data/examples/test_qtsignalsandslots/qtsignalsandslots.h create mode 100644 tests/data/examples/test_rst/rst.h create mode 100644 tests/data/examples/test_simplesect/simplesect.h create mode 100644 tests/data/examples/test_tables/tables.h create mode 100644 tests/data/examples/test_template_class_non_type/template_class_non_type.h create mode 100644 tests/data/examples/test_template_function/template_function.h create mode 100644 tests/data/examples/test_template_type_alias/template_type_alias.h create mode 100644 tests/data/examples/test_union/union.h create mode 100644 tests/data/examples/test_userdefined/userdefined.h create mode 100644 tests/data/examples/test_xrefsect/compare.xml create mode 100644 tests/data/examples/test_xrefsect/extra_dox_opts.txt create mode 100644 tests/data/examples/test_xrefsect/input.rst create mode 100644 tests/data/examples/test_xrefsect/xrefsect.h create mode 100644 tests/test_parser.py diff --git a/breathe/parser.py b/breathe/parser.py index 483783b1..5609f5aa 100644 --- a/breathe/parser.py +++ b/breathe/parser.py @@ -14,7 +14,7 @@ NodeOrValue = Node | str | None @reprlib.recursive_repr() -def node_repr(self: Node) -> str: +def node_repr(self: Node) -> str: # pragma: no cover cls = type(self) fields = [] if isinstance(self,FrozenList): @@ -26,12 +26,12 @@ def node_repr(self: Node) -> str: Node.__repr__ = node_repr # type: ignore @reprlib.recursive_repr() -def taggedvalue_repr(self: TaggedValue) -> str: +def taggedvalue_repr(self: TaggedValue) -> str: # pragma: no cover return f'{self.__class__.__name__}({self.name!r}, {self.value!r})' TaggedValue.__repr__ = taggedvalue_repr # type: ignore @reprlib.recursive_repr() -def frozenlist_repr(self: FrozenList) -> str: +def frozenlist_repr(self: FrozenList) -> str: # pragma: no cover inner = ', '.join(map(repr,self)) return f'{self.__class__.__name__}([{inner}])' FrozenList.__repr__ = frozenlist_repr # type: ignore diff --git a/examples/specific/cpp_function.h b/examples/specific/cpp_function.h index 2accc5e5..a09c92e4 100644 --- a/examples/specific/cpp_function.h +++ b/examples/specific/cpp_function.h @@ -8,8 +8,6 @@ struct Class { void (*f_issue_489)(struct Foo *foo, int value); int f_issue_338() noexcept; - - int anon_params(int, int, int x, char*); }; /** A namespace to demonstrate a namespaced function */ diff --git a/examples/specific/inheritance.h b/examples/specific/inheritance.h index 391e83a0..4ef539d8 100644 --- a/examples/specific/inheritance.h +++ b/examples/specific/inheritance.h @@ -7,7 +7,3 @@ class Main : public BaseA, BaseB {}; class ChildA : public Main {}; class ChildB : public Main {}; - -class ChildV1 : virtual public BaseA {}; -class ChildV2 : virtual public BaseA {}; -class ChildV3 : public ChildV1, ChildV2 {}; diff --git a/tests/data/auto/auto_class.h b/tests/data/auto/auto_class.h new file mode 100644 index 00000000..662655fd --- /dev/null +++ b/tests/data/auto/auto_class.h @@ -0,0 +1,12 @@ + +//! \brief class outside of namespace +class AutoClassTest { + + //! \brief non-namespaced class function + void member() {}; + + //! \brief non-namespaced class other function + void anotherMember() {}; +}; + + diff --git a/tests/data/auto/auto_function.h b/tests/data/auto/auto_function.h new file mode 100644 index 00000000..be4e3014 --- /dev/null +++ b/tests/data/auto/auto_function.h @@ -0,0 +1,6 @@ + +//! \brief non-namespaced class function +void autoFunction() {}; + +//! \brief non-namespaced class other function +void anotherAutoFunction() {}; diff --git a/tests/data/auto/compare.xml b/tests/data/auto/compare.xml new file mode 100644 index 00000000..5524c8b8 --- /dev/null +++ b/tests/data/auto/compare.xml @@ -0,0 +1,45 @@ + + + + + + class AutoClassTest + + class outside of namespace + + Private Functions + + + inline void member + + non-namespaced class function + + + + + inline void anotherMember + + non-namespaced class other function + + + + + + + Functions + + + void autoFunction + + non-namespaced class function + + + + + void anotherAutoFunction + + non-namespaced class other function + + + + diff --git a/tests/data/auto/input.rst b/tests/data/auto/input.rst new file mode 100644 index 00000000..e61d1de6 --- /dev/null +++ b/tests/data/auto/input.rst @@ -0,0 +1,2 @@ +.. autodoxygenfile:: auto_class.h +.. autodoxygenfile:: auto_function.h diff --git a/tests/data/examples/test_alias/alias.h b/tests/data/examples/test_alias/alias.h new file mode 100644 index 00000000..33ee5bc8 --- /dev/null +++ b/tests/data/examples/test_alias/alias.h @@ -0,0 +1,15 @@ + +/*! @file alias.h */ + +/** + * Foo frob routine. + * \par bob this something else + * @sideeffect Frobs any foos. + * + * \par bob this something else + * + * @sideeffect Frobs any foos. + * + * @param[out] Frobs any foos. + */ +void frob_foos(void* Frobs); diff --git a/tests/data/examples/test_array/array.h b/tests/data/examples/test_array/array.h new file mode 100644 index 00000000..034e645d --- /dev/null +++ b/tests/data/examples/test_array/array.h @@ -0,0 +1,13 @@ + +/** My function */ +int foo(int a[5]); + +/** My other function + * + * @test This declaration is supposed to be + * @code{.c} + * int bar(int n, int a[static n]); + * @endcode + * But, Sphinx fails to recognize `int a[static n])` as a C specific array syntax + */ +int bar(int n, int a[]); diff --git a/tests/data/examples/test_c_enum/c_enum.h b/tests/data/examples/test_c_enum/c_enum.h new file mode 100644 index 00000000..bf8523fe --- /dev/null +++ b/tests/data/examples/test_c_enum/c_enum.h @@ -0,0 +1,63 @@ + +// Example of a enum in C which has different syntax and different support in Sphinx to the C++ enum + +/** + * Backup data. + * + * \ingroup Backup + */ +typedef enum { + /** + * Compatibility with old gboolean used instead of format. + * + * File type is guessed for extension, non unicode format used + * for Gammu backup. + */ + GSM_Backup_Auto = 0, + /** + * Compatibility with old gboolean used instead of format. + * + * File type is guessed for extension, unicode format used + * for Gammu backup. + */ + GSM_Backup_AutoUnicode = 1, + /** + * LMB format, compatible with Logo manager, can store + * phonebooks and logos. + */ + GSM_Backup_LMB, + /** + * vCalendar standard, can store todo and calendar entries. + */ + GSM_Backup_VCalendar, + /** + * vCard standard, can store phone phonebook entries. + */ + GSM_Backup_VCard, + /** + * LDIF (LDAP Data Interchange Format), can store phone + * phonebook entries. + */ + GSM_Backup_LDIF, + /** + * iCalendar standard, can store todo and calendar entries. + */ + GSM_Backup_ICS, + /** + * Gammu own format can store almost anything from phone. + * + * This is ASCII version of the format, Unicode strings are HEX + * encoded. Use GSM_Backup_GammuUCS2 instead if possible. + */ + GSM_Backup_Gammu, + /** + * Gammu own format can store almost anything from phone. + * + * This is UCS2-BE version of the format. + */ + GSM_Backup_GammuUCS2, + /** + * vNote standard, can store phone notes. + */ + GSM_Backup_VNote, +} GSM_BackupFormat; diff --git a/tests/data/examples/test_c_file/c_file.h b/tests/data/examples/test_c_file/c_file.h new file mode 100644 index 00000000..23e40f18 --- /dev/null +++ b/tests/data/examples/test_c_file/c_file.h @@ -0,0 +1,53 @@ +/* Borrowed from git "cache-tree.h" as an example of C code */ + +#ifndef CACHE_TREE_H +#define CACHE_TREE_H + +#include "tree.h" +#include "tree-walk.h" + +struct cache_tree; +struct cache_tree_sub { + struct cache_tree *cache_tree; + int namelen; + int used; + char name[FLEX_ARRAY]; +}; + +struct cache_tree { + int entry_count; /* negative means "invalid" */ + unsigned char sha1[20]; + int subtree_nr; + int subtree_alloc; + struct cache_tree_sub **down; +}; + +/** Shared cache tree instance. */ +extern struct cache_tree global_cache_tree; + +struct cache_tree *cache_tree(void); +extern void cache_tree_free(struct cache_tree **); +void cache_tree_invalidate_path(struct cache_tree *, const char *); +struct cache_tree_sub *cache_tree_sub(struct cache_tree *, const char *); + +void cache_tree_write(struct strbuf *, struct cache_tree *root); +struct cache_tree *cache_tree_read(const char *buffer, unsigned long size); + +int cache_tree_fully_valid(struct cache_tree *); +int cache_tree_update(struct cache_tree *, struct cache_entry **, int, int, int); + +/** bitmasks to write_cache_as_tree flags */ +#define WRITE_TREE_MISSING_OK 1 +#define WRITE_TREE_IGNORE_CACHE_TREE 2 + +/** error return codes */ +#define WRITE_TREE_UNREADABLE_INDEX (-1) +#define WRITE_TREE_UNMERGED_INDEX (-2) +#define WRITE_TREE_PREFIX_ERROR (-3) + +int write_cache_as_tree(unsigned char *sha1, int flags, const char *prefix); +void prime_cache_tree(struct cache_tree **, struct tree *); + +extern int cache_tree_matches_traversal(struct cache_tree *, struct name_entry *ent, struct traverse_info *info); + +#endif diff --git a/tests/data/examples/test_class/class.cpp b/tests/data/examples/test_class/class.cpp new file mode 100644 index 00000000..c8e3bdeb --- /dev/null +++ b/tests/data/examples/test_class/class.cpp @@ -0,0 +1,12 @@ +#include "class.h" + +/*! More documentation in the impl file */ +void ClassTest::function(int myIntParameter) +{ +} + +/*! More documentation in the impl file */ +void ClassTest::anotherFunction() +{ +} + diff --git a/tests/data/examples/test_class/class.h b/tests/data/examples/test_class/class.h new file mode 100644 index 00000000..3301c000 --- /dev/null +++ b/tests/data/examples/test_class/class.h @@ -0,0 +1,137 @@ +#include + +namespace TestNamespaceClasses { + +//! \brief first class inside of namespace +class NamespacedClassTest { + +public: + + //! \brief namespaced class function + virtual void function() const = 0; + + static void functionS(); + + explicit NamespacedClassTest() {} + + //! \brief namespaced class other function + void anotherFunction() {} +}; + + +//! \brief second class inside of namespace +class ClassTest { + +public: + + //! \brief second namespaced class function + void function() {} + + //! \brief second namespaced class other function + void anotherFunction() {} + +}; + + +} // TestNamespaceClasses + +//! \brief class outside of namespace +class OuterClass { + +public: + + //! \brief inner class + class InnerClass {}; + +}; + + +//! \brief class outside of namespace +class ClassTest { + +public: + + /*! \brief non-namespaced class function + + More details in the header file. + */ + void function(int myParameter); + + //! \brief non-namespaced class other function + void anotherFunction(); + + //! \brief namespaced class function + virtual void publicFunction() const = 0; + + virtual void undocumentedPublicFunction() const = 0; + + //! A public class + class PublicClass {}; + + class UndocumentedPublicClass {}; + + //! A public struct + struct PublicStruct {}; + + struct UndocumentedPublicStruct {}; + +protected: + + //! A protected function + void protectedFunction() {} + + void undocumentedProtectedFunction() {} + + //! A protected class + class ProtectedClass {}; + + class UndocumentedProtectedClass {}; + + //! A protected struct + struct ProtectedStruct {}; + + struct UndocumentedProtectedStruct {}; + +private: + + //! This is a private function + virtual void privateFunction() const = 0; + + virtual void undocumentedPrivateFunction() const = 0; + + //! A private class + class PrivateClass {}; + + class UndocumentedPrivateClass {}; + + //! A private struct + struct PrivateStruct {}; + + struct UndocumentedPrivateStruct {}; +}; + + +template +void f0(); + +template<> +void f0(); + +namespace NS1 { + +template +void f1(); + +template<> +void f1(); + +namespace NS2 { + +template +void f2(); + +template<> +void f2(); + +} // namespace NS2 +} // namespace NS1 diff --git a/tests/data/examples/test_code_blocks/code_blocks.h b/tests/data/examples/test_code_blocks/code_blocks.h new file mode 100644 index 00000000..d2ee92d1 --- /dev/null +++ b/tests/data/examples/test_code_blocks/code_blocks.h @@ -0,0 +1,36 @@ + +/** A function with an unannotated code block with C/C++ code. + * + * @code + * char* buffer = new char[42]; + * int charsAdded = sprintf(buffer, "Tabs are normally %d spaces\n", 8); + * @endcode + */ +void with_standard_code_block(); + +/** A function with an unannotated code block with non-C/C++ code. + * + * @code + * set(user_list A B C) + * foreach(element ${user_list}) + * message(STATUS "Element is ${element}") + * endforeach() + * @endcode + * + * Another code-block that explicitly remains not highlighted. + * @code{.unparsed} + * Show this as is. + * @endcode + */ +void with_unannotated_cmake_code_block(); + +/** A function with an annotated cmake code block. + * + * @code{.cmake} + * set(user_list A B C) + * foreach(element ${user_list}) + * message(STATUS "Element is ${element}") + * endforeach() + * @endcode + */ +void with_annotated_cmake_code_block(); diff --git a/tests/data/examples/test_cpp_concept/cpp_concept.h b/tests/data/examples/test_cpp_concept/cpp_concept.h new file mode 100644 index 00000000..62a12f4b --- /dev/null +++ b/tests/data/examples/test_cpp_concept/cpp_concept.h @@ -0,0 +1,5 @@ +template +concept Hashable = requires(T a) +{ + { std::hash{}(a) } -> std::convertible_to; +}; diff --git a/tests/data/examples/test_cpp_enum/cpp_enum.h b/tests/data/examples/test_cpp_enum/cpp_enum.h new file mode 100644 index 00000000..ddf952b9 --- /dev/null +++ b/tests/data/examples/test_cpp_enum/cpp_enum.h @@ -0,0 +1,15 @@ +enum Unscoped : int { + UnscopedEnumerator = 42 +}; + +enum struct ScopedStruct : int { + Enumerator = 42 +}; + +enum class ScopedClass : int { + Enumerator = 42 +}; + +enum class ScopedClassNoUnderlying { + Enumerator = 42 +}; diff --git a/tests/data/examples/test_cpp_friendclass/cpp_friendclass.h b/tests/data/examples/test_cpp_friendclass/cpp_friendclass.h new file mode 100644 index 00000000..caa91d07 --- /dev/null +++ b/tests/data/examples/test_cpp_friendclass/cpp_friendclass.h @@ -0,0 +1,7 @@ +struct A {}; +struct B {}; + +struct C { + friend class A; + friend struct B; +}; diff --git a/tests/data/examples/test_cpp_function/cpp_function.h b/tests/data/examples/test_cpp_function/cpp_function.h new file mode 100644 index 00000000..2accc5e5 --- /dev/null +++ b/tests/data/examples/test_cpp_function/cpp_function.h @@ -0,0 +1,19 @@ +struct Foo{}; +struct Class { + virtual void f1() const volatile & = 0; + virtual void f2() const volatile && = 0; + static void f3(); + + + void (*f_issue_489)(struct Foo *foo, int value); + + int f_issue_338() noexcept; + + int anon_params(int, int, int x, char*); +}; + +/** A namespace to demonstrate a namespaced function */ +namespace TestNamespaceFunction { +/** A function within a namspace. */ +void namespaceFunc(); +} diff --git a/tests/data/examples/test_cpp_inherited_members/cpp_inherited_members.h b/tests/data/examples/test_cpp_inherited_members/cpp_inherited_members.h new file mode 100644 index 00000000..0235d58a --- /dev/null +++ b/tests/data/examples/test_cpp_inherited_members/cpp_inherited_members.h @@ -0,0 +1,16 @@ +/** + * @file + */ + +/// Base class +class Base +{ +public: + /// Base-class member function + void f_issue_356(); +}; + +/// Class A +class A : public Base {}; +/// Class B +class B : public Base {}; diff --git a/tests/data/examples/test_cpp_trailing_return_type/cpp_trailing_return_type.h b/tests/data/examples/test_cpp_trailing_return_type/cpp_trailing_return_type.h new file mode 100644 index 00000000..8954b2df --- /dev/null +++ b/tests/data/examples/test_cpp_trailing_return_type/cpp_trailing_return_type.h @@ -0,0 +1,7 @@ +/*! \file cpp_trailing_return_type.h */ + +/*! needed for references in global function return type */ +class Thingy {}; + +//! \brief Function that creates a thingy. +auto f_issue_441() -> Thingy*; diff --git a/tests/data/examples/test_define/define.h b/tests/data/examples/test_define/define.h new file mode 100644 index 00000000..c5bc2980 --- /dev/null +++ b/tests/data/examples/test_define/define.h @@ -0,0 +1,31 @@ +/** + * A simple define without a value + */ +#define USE_STUFF + + +/** + * A define with a simple value + */ +#define MAX_LENGTH 100 + + +/** + * A define with some parameters + * + * \param A The parameter A + * \param B The parameter B + * + * \returns The maximum of A and B + */ +#define MAXIMUM(A,B) ((A > B)?(A):(B)) + + +/** + * A define which spans multiple lines + */ +#define SWAP(A,B) { \ + (a) ^= (b); \ + (b) ^= (a); \ + (a) ^= (b); \ + } diff --git a/tests/data/examples/test_dot_graphs/compare.xml b/tests/data/examples/test_dot_graphs/compare.xml index ebea4a27..ff9de2f2 100644 --- a/tests/data/examples/test_dot_graphs/compare.xml +++ b/tests/data/examples/test_dot_graphs/compare.xml @@ -11,8 +11,7 @@
Using @dotfile command - -
Captions go here
+
diff --git a/tests/data/examples/test_dot_graphs/dot_graphs.h b/tests/data/examples/test_dot_graphs/dot_graphs.h new file mode 100644 index 00000000..8c8f2804 --- /dev/null +++ b/tests/data/examples/test_dot_graphs/dot_graphs.h @@ -0,0 +1,22 @@ +/** + * @file dot_graphs.h + * + * @page dotgraphs Dot Graph Demonstrations + * + * @section dotcmd Using \@dot command + * + * @dot "basic graph elements" + * digraph G { + * bgcolor="purple:pink" label="a graph" fontcolor="white" + * subgraph cluster1 { + * fillcolor="blue:cyan" label="a cluster" fontcolor="white" style="filled" gradientangle="270" + * node [shape=box fillcolor="red:yellow" style="filled" gradientangle=90] + * "a node"; + * } + * } + * @enddot + * + * @section dotfilecmd Using \@dotfile command + * + * @dotfile "dotfile.dot" "Captions go here" + */ diff --git a/tests/data/examples/test_dot_graphs/dotfile.dot b/tests/data/examples/test_dot_graphs/dotfile.dot new file mode 100644 index 00000000..47012105 --- /dev/null +++ b/tests/data/examples/test_dot_graphs/dotfile.dot @@ -0,0 +1,47 @@ +digraph G {bgcolor="red:cyan" gradientangle=0 + + subgraph cluster_0 { + style=filled; + color=lightgrey; + fillcolor="blue:yellow"; + gradientangle=90; + node [fillcolor="yellow:green" style=filled gradientangle=270] a0; + node [fillcolor="green:red"] a1; + node [fillcolor="red:cyan"] a2; + node [fillcolor="cyan:blue"] a3; + + a0 -> a1 -> a2 -> a3; + label = "process #1"; + } + + subgraph cluster_1 { + node [fillcolor="yellow:magenta" + style=filled gradientangle=270] b0; + node [fillcolor="magenta:cyan"] b1; + node [fillcolor="cyan:red"] b2; + node [fillcolor="red:blue"] b3; + + b0 -> b1 -> b2 -> b3; + label = "process #2"; + color=blue + fillcolor="blue:yellow"; + style=filled; + gradientangle=90; + } + start -> a0; + start -> b0; + a1 -> b3; + b2 -> a3; + a3 -> a0; + a3 -> end; + b3 -> end; + + start [shape=Mdiamond , + fillcolor="yellow:brown", + gradientangle=90, + style=radial]; + end [shape=Msquare, + fillcolor="orange:blue", + style=radial, + gradientangle=90]; +} diff --git a/tests/data/examples/test_group/group.h b/tests/data/examples/test_group/group.h new file mode 100644 index 00000000..e5d7589b --- /dev/null +++ b/tests/data/examples/test_group/group.h @@ -0,0 +1,90 @@ + +/** @defgroup mygroup My Group + * This is the first group + * @{ + */ + +//! \brief first class inside of namespace +class GroupedClassTest { + +public: + //! \brief namespaced class function + virtual void publicFunction() const = 0; + + virtual void undocumentedPublicFunction() const = 0; + + //! A protected class + class PublicClass {}; + + class UndocumentedPublicClass {}; + +protected: + + //! A protected function + void protectedFunction() {}; + + void undocumentedProtectedFunction() {}; + + //! A protected class + class ProtectedClass {}; + + class UndocumentedProtectedClass {}; + +private: + + //! This is a private function + virtual void privateFunction() const = 0; + + virtual void undocumentedPrivateFunction() const = 0; + + //! A private class + class PrivateClass {}; + + class UndocumentedPrivateClass {}; +}; + +//! This function is in MyGroup +void groupedFunction(); + +/** @} */ // end of mygroup + +/** @defgroup innergroup Inner Group + * @ingroup mygroup + * This is an inner group + * @{ + */ + +//! \brief inner class inside of namespace +class InnerGroupClassTest { + +public: + //! \brief inner namespaced class function + void function() {}; + +private: + + //! A private function + void innerGroupPrivateFunction() {}; + + class PrivateClass {}; +}; + +/** @} */ // end of innergroup + +//! \brief second class inside of namespace +class UngroupedClassTest { + +public: + //! \brief second namespaced class function + void function() {}; + +private: + + //! A private function + void ungroupedPrivateFunction() {}; + + class PrivateClass {}; +}; + +//! Ungrouped function +void ungroupedFunction(); diff --git a/examples/specific/group_content_only.hpp b/tests/data/examples/test_group_content_only/group_content_only.hpp similarity index 100% rename from examples/specific/group_content_only.hpp rename to tests/data/examples/test_group_content_only/group_content_only.hpp diff --git a/tests/data/examples/test_headings/headings.h b/tests/data/examples/test_headings/headings.h new file mode 100644 index 00000000..0cb8e581 --- /dev/null +++ b/tests/data/examples/test_headings/headings.h @@ -0,0 +1,18 @@ +/*! \brief This is a documentation + +This is more documentation. + +

Header

+Text + +

Header Bold Header Text

+Text + +Header +--------- +Text + +### Header ### +Text +*/ +class HeadingsTest {}; diff --git a/tests/data/examples/test_image/compare.xml b/tests/data/examples/test_image/compare.xml new file mode 100644 index 00000000..4b912e30 --- /dev/null +++ b/tests/data/examples/test_image/compare.xml @@ -0,0 +1,14 @@ + + + + + + class ImageClass + + This is a class with an image in the description. + It renders like this: + + Breathe & Sphinx should automatically copy the image from the doxygen output directory into the _images folder of the Sphinx output. + + + diff --git a/tests/data/examples/test_image/image.h b/tests/data/examples/test_image/image.h new file mode 100644 index 00000000..cbd6b50f --- /dev/null +++ b/tests/data/examples/test_image/image.h @@ -0,0 +1,9 @@ +/** +* This is a class with an image in the description. It renders like this: +* +* \image HTML pixel.png +* +* Breathe & Sphinx should automatically copy the image from the doxygen output directory into the +* _images folder of the Sphinx output. +*/ +class ImageClass {}; diff --git a/tests/data/examples/test_image/input.rst b/tests/data/examples/test_image/input.rst new file mode 100644 index 00000000..d962e0e1 --- /dev/null +++ b/tests/data/examples/test_image/input.rst @@ -0,0 +1 @@ +.. doxygenclass:: ImageClass diff --git a/tests/data/examples/test_image/pixel.png b/tests/data/examples/test_image/pixel.png new file mode 100644 index 0000000000000000000000000000000000000000..392cb4733e133f4931ffbc2798121a3ce3b81d32 GIT binary patch literal 70 zcmeAS@N?(olHy`uVBq!ia0vp^j3CSbBp9sfW`_bPUQZXt5Dr;JCWgQN8U8aeGBR9F RJm&*a;OXk;vd$@?2>_HV4kZ8p literal 0 HcmV?d00001 diff --git a/tests/data/examples/test_index/compare.xml b/tests/data/examples/test_index/compare.xml new file mode 100644 index 00000000..fa46155a --- /dev/null +++ b/tests/data/examples/test_index/compare.xml @@ -0,0 +1,341 @@ + + + + + + struct E + + + Public Members + + + char F + + + + + + + + + template<typename T>class J + + + Public Members + + + T K[H] + + + + + + Friends + + + static inline friend bool operator==J jaJ jb + + + + + + + + + template<typename T>struct L + + + Public Static Attributes + + + static constexpr bool M = false + + + + + + + + + template<typename T>struct L<J<T>> + + + Public Static Attributes + + + static constexpr bool M = true + + + + + + + + + template<typename T>concept N + + + + + + namespace G + + + Typedefs + + + typedef void (*O)(B) + + + + + + Variables + + + constexpr unsigned int H = 12 + + + + + + int I + + + + + + + + file index.h + + + Defines + + + P + + + + + + Typedefs + + + using B = long + + + + + + Enums + + + enum Q + + Values: + + + enumerator R + + + + + + enumerator S + + + + + + + + Functions + + + auto Cauto x -> A + + + + + + template<typename T>void D + + + + + + Variables + + + int A + + + + + + + + Defines + + + P + + + + + + Typedefs + + + using B = long + + + + + + Enums + + + enum Q + + Values: + + + enumerator R + + + + + + enumerator S + + + + + + + + Functions + + + auto Cauto x -> A + + + + + + template<typename T>void D + + + + + + Variables + + + int A + + + + + + + struct E + + + Public Members + + + char F + + + + + + + + + namespace G + + + Typedefs + + + typedef void (*O)(B) + + + + + + Variables + + + constexpr unsigned int H = 12 + + + + + + int I + + + + + + + template<typename T>class J + + + Public Members + + + T K[H] + + + + + + Friends + + + static inline friend bool operator==J jaJ jb + + + + + + + + + template<typename T>struct L + + + Public Static Attributes + + + static constexpr bool M = false + + + + + + + + + template<typename T>struct L<J<T>> + + + Public Static Attributes + + + static constexpr bool M = true + + + + + + + + + diff --git a/tests/data/examples/test_index/index.h b/tests/data/examples/test_index/index.h new file mode 100644 index 00000000..82ff6709 --- /dev/null +++ b/tests/data/examples/test_index/index.h @@ -0,0 +1,43 @@ + +int A; + +using B = long; + +auto C(auto x) -> A; + +template void D(); + +struct E { + char F; +}; + +namespace G { + +constexpr unsigned int H = 12; + +extern int I; + +template class J { +public: + T K[H]; + static friend bool operator==(J ja,J jb) { + for(unsigned int i=0; i < H; ++i) { + if(ja[i] != jb[i]) return false; + } + return true; + } +}; + +template struct L { static constexpr bool M = false; }; +template struct L> { static constexpr bool M = true; }; + +template concept N = L::Q; + +typedef void (*O)(B); + +} + +#define P U + +enum Q {R=0, S}; + diff --git a/tests/data/examples/test_index/input.rst b/tests/data/examples/test_index/input.rst new file mode 100644 index 00000000..2f32bede --- /dev/null +++ b/tests/data/examples/test_index/input.rst @@ -0,0 +1,2 @@ +.. doxygenindex:: +.. doxygenfile:: index.h diff --git a/tests/data/examples/test_inheritance/inheritance.h b/tests/data/examples/test_inheritance/inheritance.h new file mode 100644 index 00000000..391e83a0 --- /dev/null +++ b/tests/data/examples/test_inheritance/inheritance.h @@ -0,0 +1,13 @@ + +class BaseA {}; +class BaseB {}; + +/*! \brief This is the main class we're interested in */ +class Main : public BaseA, BaseB {}; + +class ChildA : public Main {}; +class ChildB : public Main {}; + +class ChildV1 : virtual public BaseA {}; +class ChildV2 : virtual public BaseA {}; +class ChildV3 : public ChildV1, ChildV2 {}; diff --git a/tests/data/examples/test_inline/inline.h b/tests/data/examples/test_inline/inline.h new file mode 100644 index 00000000..a21313c3 --- /dev/null +++ b/tests/data/examples/test_inline/inline.h @@ -0,0 +1,17 @@ +#include +/** A class to demonstrate inline documentation syntax. */ +class InlineTest +{ + public: + /** A member function. + * + * Details about member function + * + * \exception std::out_of_range parameter is out of range. + * @return a character pointer. + */ + const char *member(char c, ///< c a character. + int n) ///< n an integer. + + throw(std::out_of_range); +}; diff --git a/tests/data/examples/test_latexmath/latexmath.h b/tests/data/examples/test_latexmath/latexmath.h new file mode 100644 index 00000000..16637580 --- /dev/null +++ b/tests/data/examples/test_latexmath/latexmath.h @@ -0,0 +1,18 @@ + +/** + * @brief A class + * + * A inline formula: \f$ f(x) = a + b \f$ + * + * A display style formula: + * @f[ + * \int_a^b f(x) dx = F(b) - F(a) + * @f] + */ +class MathHelper +{ +public: + MathHelper() {} + ~MathHelper() {} +} + diff --git a/tests/data/examples/test_links/links.h b/tests/data/examples/test_links/links.h new file mode 100644 index 00000000..ef09f694 --- /dev/null +++ b/tests/data/examples/test_links/links.h @@ -0,0 +1,9 @@ + +/*! \brief first struct inside of namespace + + This is a longer description with a link to a webpage + in the text http://www.github.com in order to test out Breathe's + handling of links. + + */ +class LinksTest {}; diff --git a/tests/data/examples/test_lists/lists.h b/tests/data/examples/test_lists/lists.h new file mode 100644 index 00000000..64790506 --- /dev/null +++ b/tests/data/examples/test_lists/lists.h @@ -0,0 +1,201 @@ +/** + * \brief This is a list example. + * + * Following is a list using '+' for bullets: + * + One item. + * + Two items. + * + Three items. + * + Four. + * + * And this is some more text. + */ +class SimpleList_1 +{ +}; + +/** + * \brief This is a list example. + * + * Following is a list using '-' for bullets: + * - One item. + * - Two items. + * - Three items. + * - Four. + * + * And this is some more text. + */ +class SimpleList_2 +{ +}; + +/** + * \brief This is a list example. + * + * Following is a list using '*' for bullets: + * * One item. + * * Two items. + * * Three items. + * * Four. + * + * And this is some more text. + */ +class SimpleList_3 +{ +}; + +/** + * \brief This is a list example. + * + * Following is an auto-numbered list: + * -# One item. + * -# Two items. + * -# Three items. + * -# Four. + * + * And this is some more text. + */ +class SimpleList_4 +{ +}; + +/** + * \brief This is a list example. + * + * Following is a numbered list: + * 1. One item. + * 2. Two items. + * 3. Three items. + * 4. Four. + * + * And this is some more text. + */ +class SimpleList_5 +{ +}; + +/** + * \brief This is a list example. + * + * Following is an unordered list using 'HTML' tags: + *
  • One item. + *
  • Two items. + *
  • Three items. + *
  • Four. + *
+ * + * And this is some more text. + */ +class SimpleList_6 +{ +}; + +/** + * A list of events: + * - mouse events + * -# mouse move event + * -# mouse click event\n + * More info about the click event. + * -# mouse double click event + * - keyboard events + * 1. key down event + * 2. key up event + * + * More text here. + */ +class NestedLists_1 +{ +}; + +/** + * Text before the list + * - list item 1 + * - sub item 1 + * - sub sub item 1 + * - sub sub item 2 + * . + * The dot above ends the sub sub item list. + * + * More text for the first sub item + * . + * The dot above ends the first sub item. + * + * More text for the first list item + * - sub item 2 + * - sub item 3 + * - list item 2 + * . + * More text in the same paragraph. + * + * More text in a new paragraph. + */ +class NestedLists_2 +{ +}; + +/*! + * A list of events: + *
    + *
  • mouse events + *
      + *
    1. mouse move event + *
    2. mouse click event
      + * More info about the click event. + *
    3. mouse double click event + *
    + *
  • keyboard events + *
      + *
    1. key down event + *
    2. key up event + *
    + *
+ * More text here. + */ + class NestedLists_3 +{ +}; + +/** + * A list of events: + * 1. mouse events + * -# mouse move event + * 1. swipe event + * 2. circle event + * 3. wave event + * -# mouse click event\n + * More info about the click event. + * -# mouse double click event + * 2. keyboard events + * -# key down event + * -# key up event + * 3. touch events + * -# pinch event + * -# swipe event + * More text here. + */ +class NestedLists_4 +{ +}; + +/** + * A deeply nested list of events: + * 1. mouse events + * -# mouse move event + * 1. swipe event + * -# swipe left + * -# swipe right + * 2. circle event + * 3. wave event + * -# mouse click event\n + * More info about the click event. + * -# mouse double click event + * 2. keyboard events + * -# key down event + * -# key up event + * 3. touch events + * -# pinch event + * -# swipe event + * More text here. + */ +class NestedLists_5 +{ +}; diff --git a/tests/data/examples/test_membergroups/membergroups.h b/tests/data/examples/test_membergroups/membergroups.h new file mode 100644 index 00000000..c35ab530 --- /dev/null +++ b/tests/data/examples/test_membergroups/membergroups.h @@ -0,0 +1,13 @@ +//! \brief demonstrates member groups +class GroupedMembers { + +public: + + ///@{ @name myGroup + void in_mygroup_one(int myParameter); ///< A function + void in_mygroup_two(int myParameter); ///< Another function + ///@} + + void not_in_mygroup(int myParameter); ///< This one is not in myGroup + +}; diff --git a/examples/specific/param_dirs.h b/tests/data/examples/test_param_dirs/param_dirs.h similarity index 100% rename from examples/specific/param_dirs.h rename to tests/data/examples/test_param_dirs/param_dirs.h diff --git a/tests/data/examples/test_qtsignalsandslots/qtsignalsandslots.h b/tests/data/examples/test_qtsignalsandslots/qtsignalsandslots.h new file mode 100644 index 00000000..493e1708 --- /dev/null +++ b/tests/data/examples/test_qtsignalsandslots/qtsignalsandslots.h @@ -0,0 +1,40 @@ +#ifndef QT_OBJECT_H +#define QT_OBJECT_H + +/*! +*\brief Forward declaration of QT API class + +QT slots and signals typically `#include `, but this example is parsed without QT SDK installed. +*/ +extern class QObject; + +class QtSignalSlotExample: public QObject +{ + Q_OBJECT + + public: + + /*! + *\param iShownParameter + This is shown in declaration + */ + void workingFunction( int iShownParameter ) { Q_UNUSED( iShownParameter ; ) } + + signals: + + /*! + \param iShown + This is in function declaration + */ + void workingSignal( int iShown ); + + public slots: + + /*! + \param iShown + This is in function declaration + */ + void workingSlot( int iShown ) { iShown; } +}; + +#endif diff --git a/tests/data/examples/test_rst/rst.h b/tests/data/examples/test_rst/rst.h new file mode 100644 index 00000000..24d88b31 --- /dev/null +++ b/tests/data/examples/test_rst/rst.h @@ -0,0 +1,77 @@ + +//! \brief first class inside of namespace +class TestClass +{ +public: + + /*! + Inserting additional reStructuredText information. + + \rst + + This is some funky non-XML compliant text: <& !>< + + .. note:: + + This reStructuredText has been handled correctly. + \endrst + + This is just a standard verbatim block with code: + + \verbatim + child = 0; + while( child = parent->IterateChildren( child ) ) + \endverbatim + + */ + virtual void function() const = 0; + + /*! + Inserting additional reStructuredText information. + \verbatim embed:rst + .. note:: + + This reStructuredText has been handled correctly. + \endverbatim + */ + virtual void rawVerbatim() const = 0; + + /*! + * Inserting additional reStructuredText information. + * + * \verbatim embed:rst:leading-asterisk + * Some example code:: + * + * int example(int x) { + * return x * 2; + * } + * \endverbatim + */ + virtual void rawLeadingAsteriskVerbatim() const = 0; + + /// Some kind of method + /// + /// @param something a parameter + /// + /// @verbatim embed:rst:leading-slashes + /// .. code-block:: c + /// :linenos: + /// + /// bool foo(bool something) { + /// return something; + /// }; + /// + /// @endverbatim + /// @note Documentation using `///` should begin and end in a blank line. + + virtual void rawLeadingSlashesVerbatim(int something) const = 0; + + /*! + Inserting an inline reStructuredText snippet. + Linking to another function: \inlinerst :cpp:func:`TestClass::rawVerbatim` \endrst + */ + virtual void rawInlineVerbatim() const = 0; + + //! Brief description + virtual void testFunction() const {}; +}; diff --git a/tests/data/examples/test_simplesect/simplesect.h b/tests/data/examples/test_simplesect/simplesect.h new file mode 100644 index 00000000..bc5634b0 --- /dev/null +++ b/tests/data/examples/test_simplesect/simplesect.h @@ -0,0 +1,19 @@ +/*! + + \pre stuff must be correct + \pre more stuff must be correct + \post stuff will be nice + \post more stuff will be nice + \return nothing + \par par, something + \warning warning, don't do this + \note note, be careful + \see see, f_raw + \sa sa, f_raw + \remark remark, 1 + \remark remark, 2 + \remarks remarks, 1 + \remarks remarks, 2 +*/ +template +void f(int a, float b, std::string c); diff --git a/tests/data/examples/test_tables/tables.h b/tests/data/examples/test_tables/tables.h new file mode 100644 index 00000000..d6340649 --- /dev/null +++ b/tests/data/examples/test_tables/tables.h @@ -0,0 +1,96 @@ +/** + * \brief This is a simple Markdown table example. + * + * Following is a simple table using Markdown syntax. + * + * First Header | Second Header + * ------------- | ------------- + * Content Cell | Content Cell + * Content Cell | Content Cell + * + * And this is some more text. + */ +class Table_1 +{ +}; + +/** + * \brief This is a Markdown table with alignment. + * + * Following is a table with alignment using Markdown syntax. + * + * | Right | Center | Left | + * | ----: | :----: | :---- | + * | 10 | 10 | 10 | + * | 1000 | 1000 | 1000 | + * + * And this is some more text. + */ +class Table_2 +{ +}; + +/** + * \brief This is a Markdown table with rowspan and alignment. + * + * Following is a table with rowspan and alignment using Markdown syntax. + * + * | Right | Center | Left | + * | ----: | :----: | :---- | + * | 10 | 10 | 10 | + * | ^ | 1000 | 1000 | + * + * And this is some more text. + */ +class Table_3 +{ +}; + +/** + * \brief This is a Markdown table with colspan and alignment. + * + * Following is a table with colspan and alignment using Markdown syntax. + * + * | Right | Center | Left | + * | ----: | :----: | :---- | + * | 10 | 10 | 10 | + * | 1000 ||| + * + * And this is some more text. + */ +class Table_4 +{ +}; + +/** + * \brief This is a Doxygen table. + * + * Following is a table using Doxygen syntax (and all supported features). + * + * + * + *
Complex table
Column 1 Column 2 Column 3 + *
cell row=1+2,col=1cell row=1,col=2cell row=1,col=3 + *
cell row=2+3,col=2 cell row=2,col=3 + *
cell row=3,col=1 cell row=3+4,col=3 + *
cell row=4,col=1+2 + *
cell row=5,col=1 cell row=5,col=2+3 + *
cell row=6+7,col=1+2 cell row=6,col=3 + *
cell row=7,col=3 + *
cell row=8,col=1 cell row=8,col=2\n + * + *
Inner cell row=1,col=1Inner cell row=1,col=2 + *
Inner cell row=2,col=1Inner cell row=2,col=2 + *
+ *
cell row=8,col=3 + *
    + *
  • Item 1 + *
  • Item 2 + *
+ *
+ * + * And this is some more text. + */ +class Table_5 +{ +}; diff --git a/tests/data/examples/test_template_class_non_type/template_class_non_type.h b/tests/data/examples/test_template_class_non_type/template_class_non_type.h new file mode 100644 index 00000000..28ac60b5 --- /dev/null +++ b/tests/data/examples/test_template_class_non_type/template_class_non_type.h @@ -0,0 +1,38 @@ +/** + * @brief a class with three template parameters + * + * @tparam T this is the first template parameter + * @tparam U this is the second template parameter + * @tparam N this is the third template parameter, it is a non-type parameter + */ +template +class anothertemplateclass +{ +public: + /// default constructor + anothertemplateclass() {} + + /** + * @brief constructor with two template argument + * + * @param m1 first argument + * @param m2 second argument + */ + anothertemplateclass(T const & m1, U const & m2) : + member1(m1), member2(m2) {} + + /** + * @brief member accepting template argument and returning template argument + * + * @param t argument + * @return returns value of type U + */ + U method(T const & t); + + private: + /// a member with templated type + T member1; + + /// another member with templated type + U member2; +}; diff --git a/tests/data/examples/test_template_function/compare.xml b/tests/data/examples/test_template_function/compare.xml index af01fabc..ea76b225 100644 --- a/tests/data/examples/test_template_function/compare.xml +++ b/tests/data/examples/test_template_function/compare.xml @@ -95,5 +95,12 @@
+ + + template<typename T = void, typename, int>void function3 + + a function with unnamed arguments and an argument with a default value + +
diff --git a/tests/data/examples/test_template_function/template_function.h b/tests/data/examples/test_template_function/template_function.h new file mode 100644 index 00000000..d913e6d5 --- /dev/null +++ b/tests/data/examples/test_template_function/template_function.h @@ -0,0 +1,52 @@ +#include + +/** + * @brief a function with one template arguments + * + * @tparam T this is the template parameter + * + * @param arg1 argument of type T + * + * @return return value of type T + */ +template +T function1(T arg1) +{} + + +/** + * @brief a function with one template argument specialized for `std::string` + * + * @param arg1 argument of type `std::string` + * + * @return return value of type `std::string` + */ +template <> +std::string function1(std::string arg1) +{} + + +/** + * @brief a function with three template arguments + * + * @tparam T this is the first template parameter + * @tparam U this is the second template parameter + * @tparam N this is the third template parameter, it is a non-type parameter + * + * @param arg1 first argument of type T + * @param arg2 second argument of type U + * + * @return return value of type T + */ +template +T function2(T arg1, U arg2) +{} + +/** + * @brief a function with unnamed arguments and an argument with a default + * value + */ +template +void function3() +{} + diff --git a/tests/data/examples/test_template_type_alias/template_type_alias.h b/tests/data/examples/test_template_type_alias/template_type_alias.h new file mode 100644 index 00000000..f8545275 --- /dev/null +++ b/tests/data/examples/test_template_type_alias/template_type_alias.h @@ -0,0 +1,20 @@ +/** + * @brief a type alias with one template argument + * + * @tparam T this is the template parameter + * + */ +template +using IsFuzzy = std::is_fuzzy; + + +/** + * @brief a type alias with three template arguments + * + * @tparam T this is the first template parameter + * @tparam U this is the second template parameter + * @tparam N this is the third template parameter, it is a non-type parameter + * + */ +template +using IsFurry = std::is_furry; diff --git a/tests/data/examples/test_union/compare.xml b/tests/data/examples/test_union/compare.xml index fad965fc..34973b58 100644 --- a/tests/data/examples/test_union/compare.xml +++ b/tests/data/examples/test_union/compare.xml @@ -25,59 +25,6 @@
- - - class ClassWithUnion - - A class with a union. - - - class ExtraClass - - Documented class. - - Private Members - - - int a_member - - - - - - float another_member - - - - - - - - - union UnionInClass - - A union with two values. - - Public Members - - - int intvalue - - An int value. - - - - - float floatvalue - - A float value. - - - - - - - namespace foo diff --git a/tests/data/examples/test_union/input.rst b/tests/data/examples/test_union/input.rst index 87bd2c73..5b48956e 100644 --- a/tests/data/examples/test_union/input.rst +++ b/tests/data/examples/test_union/input.rst @@ -1 +1,2 @@ -.. doxygenfile:: union.h +.. doxygenunion:: SeparateUnion +.. doxygennamespace:: foo diff --git a/tests/data/examples/test_union/union.h b/tests/data/examples/test_union/union.h new file mode 100644 index 00000000..a8e4b763 --- /dev/null +++ b/tests/data/examples/test_union/union.h @@ -0,0 +1,38 @@ + + +/// A union of two values +union SeparateUnion +{ + int size; ///< The size of the thing + float depth; ///< How deep it is +}; + + +namespace foo { + +/// A union of two values +union MyUnion +{ + int someInt; ///< The int of it all + float someFloat; ///< The float side of things +}; + +} + +/// A class with a union +class ClassWithUnion +{ + /// A union with two values + union UnionInClass + { + int intvalue; ///< An int value + float floatvalue; ///< A float value + }; + + /// Documented class + class ExtraClass + { + int a_member; + float another_member; + }; +}; diff --git a/tests/data/examples/test_userdefined/userdefined.h b/tests/data/examples/test_userdefined/userdefined.h new file mode 100644 index 00000000..7bb8827b --- /dev/null +++ b/tests/data/examples/test_userdefined/userdefined.h @@ -0,0 +1,31 @@ +// Example from Doxygen documentation + +/** A class. More details about the UserDefinedGroupTest class */ +class UserDefinedGroupTest +{ + public: + //@{ + /** Same documentation for both members. Details */ + void func1InGroup1(); + void func2InGroup1(); + //@} + + /** Function without group. Details. */ + void ungroupedFunction(); + void func1InCustomGroup(); + protected: + void func2InCustomGroup(); +}; + +void UserDefinedGroupTest::func1InGroup1() {} +void UserDefinedGroupTest::func2InGroup1() {} + +/** @name Custom Group + * Description of custom group + */ +//@{ +/** Function 2 in custom group. Details. */ +void UserDefinedGroupTest::func2InCustomGroup() {} +/** Function 1 in custom group. Details. */ +void UserDefinedGroupTest::func1InCustomGroup() {} +//@} diff --git a/tests/data/examples/test_xrefsect/compare.xml b/tests/data/examples/test_xrefsect/compare.xml new file mode 100644 index 00000000..f8cb432d --- /dev/null +++ b/tests/data/examples/test_xrefsect/compare.xml @@ -0,0 +1,41 @@ + + + + A few examples of xrefsect items support. + + Functions + + + int unimplementedvoid + + An example of using Doxygen’s todo command. + Todo:Implement this function. + + + + + void buggy_functionint param + + An example of using Doxygen’s bug and test commands. + Bug:Does not work yet. + Test:Add proper unit testing first. + + + + + void old_functionvoid + + An example of using Doxygen’s deprecated command. + Deprecated:Should not be used on new code. + + + + + void sample_xrefitem_functionvoid + + An example of a custom Doxygen xrefitem declared as an ALIAS. + xref Sample:This text shows up in the xref output. + + + + diff --git a/tests/data/examples/test_xrefsect/extra_dox_opts.txt b/tests/data/examples/test_xrefsect/extra_dox_opts.txt new file mode 100644 index 00000000..81dca8e2 --- /dev/null +++ b/tests/data/examples/test_xrefsect/extra_dox_opts.txt @@ -0,0 +1 @@ +ALIASES = "xrefsample=\xrefitem xrefsample \"xref Sample\" \"xref Sample\" " diff --git a/tests/data/examples/test_xrefsect/input.rst b/tests/data/examples/test_xrefsect/input.rst new file mode 100644 index 00000000..35e0d8e0 --- /dev/null +++ b/tests/data/examples/test_xrefsect/input.rst @@ -0,0 +1 @@ +.. doxygenfile:: xrefsect.h diff --git a/tests/data/examples/test_xrefsect/xrefsect.h b/tests/data/examples/test_xrefsect/xrefsect.h new file mode 100644 index 00000000..eb46d10d --- /dev/null +++ b/tests/data/examples/test_xrefsect/xrefsect.h @@ -0,0 +1,34 @@ +/** + * @file xrefsect.h + * A few examples of xrefsect items support. + */ + +/** + * An example of using Doxygen's todo command. + * + * @todo Implement this function. + */ +int unimplemented(void); + +/** + * An example of using Doxygen's bug and test commands. + * + * @bug Does not work yet. + * + * @test Add proper unit testing first. + */ +void buggy_function(int param); + +/** + * An example of using Doxygen's deprecated command. + * + * @deprecated Should not be used on new code. + */ +void old_function(void); + +/** + * An example of a custom Doxygen xrefitem declared as an ALIAS. + * + * @xrefsample This text shows up in the xref output. + */ +void sample_xrefitem_function(void); diff --git a/tests/test_examples.py b/tests/test_examples.py index 10fd00a0..d3b37828 100644 --- a/tests/test_examples.py +++ b/tests/test_examples.py @@ -7,17 +7,13 @@ import dataclasses -EXAMPLES_SOURCE_DIR = pathlib.Path(__file__).parent.parent / 'examples' / 'specific' - DOXYFILE_TEMPLATE = """ PROJECT_NAME = "example" HAVE_DOT = YES -DOTFILE_DIRS = "{source_dir}" GENERATE_LATEX = NO GENERATE_MAN = NO GENERATE_RTF = NO CASE_SENSE_NAMES = NO -INPUT = {input} OUTPUT_DIRECTORY = "{output}" QUIET = YES JAVADOC_AUTOBRIEF = YES @@ -34,6 +30,14 @@ BUFFER_SIZE = 0x1000 +TEST_DATA_DIR = pathlib.Path(__file__).parent / 'data' + +DEFAULT_CONF = { + 'project': 'test', + 'breathe_default_project': 'example', + 'breathe_show_include': False, + 'extensions': ['breathe','sphinx.ext.graphviz']} + class XMLEventType(enum.Enum): E_START = enum.auto() @@ -121,15 +125,7 @@ def handle_text(data): def get_individual_tests(): - return (pathlib.Path(__file__).parent / "data" / "examples").glob("test_*") - -def filter_c_files(name): - for p in EXAMPLES_SOURCE_DIR.glob(name + '.*'): - if p.suffix in C_FILE_SUFFIXES: - full = str(p) - if '"' in full: - raise ValueError('quotations marks not allowed in path names') - yield f'"{full}"' + return (TEST_DATA_DIR / "examples").glob("test_*") def filtered_xml(infile): ignore = 0 @@ -151,44 +147,19 @@ def filtered_xml(infile): node.value = text yield event, node -@pytest.mark.parametrize('test_input', get_individual_tests()) -def test_example(make_app, tmp_path, test_input): - doxygen = shutil.which('doxygen') - if doxygen is None: - raise ValueError('cannot find doxygen executable') - - doxyfile = tmp_path / "Doxyfile" - doxycontent = DOXYFILE_TEMPLATE.format( - input=" ".join(filter_c_files(test_input.stem.removeprefix('test_'))), - source_dir=EXAMPLES_SOURCE_DIR, - output=tmp_path - ) - extra_opts = test_input / 'extra_dox_opts.txt' - if extra_opts.exists(): - doxycontent += extra_opts.read_text() - doxyfile.write_text(doxycontent) - (tmp_path / "conf.py").touch() - shutil.copyfile(test_input / "input.rst", tmp_path / "index.rst") - - subprocess.run([doxygen, doxyfile], check = True) - - make_app( - buildername='xml', - srcdir=tmp_path, - confoverrides={ - 'project': 'test', - 'breathe_projects': {'example': str(tmp_path / "xml")}, - 'breathe_default_project': 'example', - 'breathe_show_include': False, - 'extensions': ['breathe','sphinx.ext.graphviz']}).build() +def conf_overrides(extra): + conf = DEFAULT_CONF.copy() + conf.update(extra) + return conf +def compare_xml(generated, model): event_str = { XMLEventType.E_START: 'element start', XMLEventType.E_END: 'element end', XMLEventType.E_TEXT: 'text' } - with open(tmp_path / '_build' / 'xml' / 'index.xml') as o_file, open(test_input / 'compare.xml') as c_file: + with open(generated) as o_file, open(model) as c_file: for o, c in zip(filtered_xml(o_file),filtered_xml(c_file)): o_type, o_node = o c_type, c_node = c @@ -204,3 +175,44 @@ def test_example(make_app, tmp_path, test_input): assert o_value == value, f'wrong value for attribute "{key}" at line {o_node.line_no}: expected "{value}", found "{o_value}"' elif o_type == XMLEventType.E_TEXT: assert o_node.value == c_node.value, f'wrong content at line {o_node.line_no}: expected "{c_node.value}", found "{o_node.value}"' + +@pytest.mark.parametrize('test_input', get_individual_tests()) +def test_example(make_app, tmp_path, test_input, monkeypatch): + monkeypatch.chdir(test_input) + + doxygen = shutil.which('doxygen') + if doxygen is None: + raise ValueError('cannot find doxygen executable') + + doxyfile = tmp_path / "Doxyfile" + doxycontent = DOXYFILE_TEMPLATE.format(output=tmp_path) + extra_opts = test_input / 'extra_dox_opts.txt' + if extra_opts.exists(): + doxycontent += extra_opts.read_text() + doxyfile.write_text(doxycontent) + (tmp_path / "conf.py").touch() + shutil.copyfile(test_input / "input.rst", tmp_path / "index.rst") + + subprocess.run([doxygen, doxyfile], check = True) + + make_app( + buildername='xml', + srcdir=tmp_path, + confoverrides=conf_overrides({ + 'breathe_projects': {'example': str(tmp_path / "xml")}})).build() + + compare_xml(tmp_path / '_build' / 'xml' / 'index.xml', test_input / 'compare.xml') + +def test_auto(make_app, tmp_path, monkeypatch): + test_input = TEST_DATA_DIR / 'auto' + monkeypatch.chdir(test_input) + (tmp_path / "conf.py").touch() + shutil.copyfile(test_input / "input.rst", tmp_path / "index.rst") + + make_app( + buildername='xml', + srcdir=tmp_path, + confoverrides=conf_overrides({ + 'breathe_projects_source': {'example': (test_input, ['auto_class.h', 'auto_function.h'])}})).build() + + compare_xml(tmp_path / '_build' / 'xml' / 'index.xml', test_input / 'compare.xml') diff --git a/tests/test_parser.py b/tests/test_parser.py new file mode 100644 index 00000000..78a3272d --- /dev/null +++ b/tests/test_parser.py @@ -0,0 +1,67 @@ +import pytest +from breathe import parser + +def test_bad_content(): + xml = """ + + + Sample + sample.hpp + + + int + int public_field + + public_field + Sample::public_field + + + + + + """ + + with pytest.raises(parser.ParseError) as exc: + parser.parse_str(xml) + assert exc.value.lineno == 6 + +def test_malformed(): + xml = """ + + + Sample + sample.hpp + + + int""" + + with pytest.raises(parser.ParseError): + parser.parse_str(xml) + +def test_unknown_tag(): + xml = """ + + + Sample + + Sample + + sample.hpp + + + int + int public_field + + public_field + Sample::public_field + + + + + + """ + + with pytest.warns(parser.ParseWarning) as record: + parser.parse_str(xml) + assert len(record) == 1 + assert 'Warning on line 5:' in str(record[0].message) From 4323b9c268841ab37fce79e32ff01d5132774094 Mon Sep 17 00:00:00 2001 From: Rouslan Korneychuk Date: Tue, 12 Dec 2023 20:36:24 -0500 Subject: [PATCH 30/65] Formatted with Black and added test --- breathe/apidoc.py | 21 +- breathe/directives/__init__.py | 4 +- breathe/directives/class_like.py | 33 +- breathe/directives/content_block.py | 41 +- breathe/directives/file.py | 2 +- breathe/directives/function.py | 35 +- breathe/directives/index.py | 8 +- breathe/directives/item.py | 11 +- breathe/finder/__init__.py | 13 +- breathe/finder/compound.py | 2 +- breathe/finder/factory.py | 14 +- breathe/finder/index.py | 3 +- breathe/parser.py | 98 ++- breathe/project.py | 7 +- breathe/renderer/__init__.py | 24 +- breathe/renderer/filter.py | 5 +- breathe/renderer/mask.py | 26 +- breathe/renderer/sphinxrenderer.py | 15 +- documentation/source/conf.py | 5 - pyproject.toml | 2 +- setup.py | 87 ++- tests/conftest.py | 5 +- tests/data/examples/test_python/compare.xml | 76 ++ tests/data/examples/test_python/input.rst | 1 + tests/data/examples/test_python/pyexample.py | 31 + tests/test_examples.py | 135 ++-- tests/test_filters.py | 84 +- tests/test_parser.py | 5 +- tests/test_renderer.py | 64 +- tests/test_utils.py | 3 +- xml_parser_generator/make_parser.py | 724 +++++++++++------- ...module_template.c => module_template.c.in} | 0 ...ubs_template.pyi => stubs_template.pyi.in} | 0 33 files changed, 984 insertions(+), 600 deletions(-) create mode 100644 tests/data/examples/test_python/compare.xml create mode 100644 tests/data/examples/test_python/input.rst create mode 100644 tests/data/examples/test_python/pyexample.py rename xml_parser_generator/{module_template.c => module_template.c.in} (100%) rename xml_parser_generator/{stubs_template.pyi => stubs_template.pyi.in} (100%) diff --git a/breathe/apidoc.py b/breathe/apidoc.py index e7ff33eb..c0a5db2b 100644 --- a/breathe/apidoc.py +++ b/breathe/apidoc.py @@ -14,8 +14,6 @@ :copyright: Originally by Sphinx Team, C++ modifications by Tatsuyuki Ishi :license: BSD, see LICENSE for details. """ -from __future__ import print_function - import os import sys import argparse @@ -24,13 +22,6 @@ from breathe import __version__ -# Account for FileNotFoundError in Python 2 -# IOError is broader but will hopefully suffice -try: - FileNotFoundError -except NameError: - FileNotFoundError = IOError - # Reference: Doxygen XSD schema file, CompoundKind only # Only what breathe supports are included @@ -76,9 +67,10 @@ def write_file(name, text, args): if orig == text: print_info("File %s up to date, skipping." % fname, args) return - except FileNotFoundError: + except OSError as exc: # Don't mind if it isn't there - pass + if exc.errno != errno.ENOENT: + raise with open(fname, "w") as target: target.write(text) @@ -86,7 +78,11 @@ def write_file(name, text, args): def format_heading(level, text): """Create a heading of [1, 2 or 3 supported].""" - underlining = ["=", "-", "~",][ + underlining = [ + "=", + "-", + "~", + ][ level - 1 ] * len(text) return "%s\n%s\n\n" % (text, underlining) @@ -146,6 +142,7 @@ def __init__(self, option_strings, dest, **kwargs): self.metavar = ",".join(TYPEDICT.keys()) def __call__(self, parser, namespace, values, option_string=None): + assert isinstance(values, str) value_list = values.split(",") for value in value_list: if value not in TYPEDICT: diff --git a/breathe/directives/__init__.py b/breathe/directives/__init__.py index 4609c906..2306e7fc 100644 --- a/breathe/directives/__init__.py +++ b/breathe/directives/__init__.py @@ -6,7 +6,7 @@ from breathe.renderer.filter import FilterFactory from breathe.renderer.sphinxrenderer import SphinxRenderer -from sphinx.directives import SphinxDirective # pyright: ignore +from sphinx.directives import SphinxDirective # pyright: ignore from docutils import nodes @@ -31,7 +31,7 @@ def warn( raw_text: str, *, rendered_nodes: Sequence[nodes.Node] | None = None, - unformatted_suffix: str = "" + unformatted_suffix: str = "", ) -> list[nodes.Node]: raw_text = self.format(raw_text) + unformatted_suffix if rendered_nodes is None: diff --git a/breathe/directives/class_like.py b/breathe/directives/class_like.py index 0ba259f5..39b67985 100644 --- a/breathe/directives/class_like.py +++ b/breathe/directives/class_like.py @@ -12,6 +12,7 @@ if TYPE_CHECKING: import sys + if sys.version_info >= (3, 11): from typing import NotRequired, TypedDict else: @@ -19,19 +20,23 @@ from breathe import renderer from docutils.nodes import Node - DoxClassOptions = TypedDict('DoxClassOptions',{ - 'path': str, - 'project': str, - 'members': NotRequired[str], - 'membergroups': str, - 'members-only': NotRequired[None], - 'protected-members': NotRequired[None], - 'private-members': NotRequired[None], - 'undoc-members': NotRequired[None], - 'show': str, - 'outline': NotRequired[None], - 'no-link': NotRequired[None], - 'allow-dot-graphs': NotRequired[None]}) + DoxClassOptions = TypedDict( + "DoxClassOptions", + { + "path": str, + "project": str, + "members": NotRequired[str], + "membergroups": str, + "members-only": NotRequired[None], + "protected-members": NotRequired[None], + "private-members": NotRequired[None], + "undoc-members": NotRequired[None], + "show": str, + "outline": NotRequired[None], + "no-link": NotRequired[None], + "allow-dot-graphs": NotRequired[None], + }, + ) else: DoxClassOptions = None @@ -60,7 +65,7 @@ class _DoxygenClassLikeDirective(BaseDirective): def run(self) -> list[Node]: name = self.arguments[0] - options = cast(DoxClassOptions,self.options) + options = cast(DoxClassOptions, self.options) try: project_info = self.project_info_factory.create_project_info(options) diff --git a/breathe/directives/content_block.py b/breathe/directives/content_block.py index 42a5f204..a0a3265f 100644 --- a/breathe/directives/content_block.py +++ b/breathe/directives/content_block.py @@ -12,10 +12,11 @@ from docutils.nodes import Node from docutils.parsers.rst.directives import unchanged_required, flag -from typing import Any, cast, ClassVar, Literal, TYPE_CHECKING +from typing import cast, ClassVar, Literal, TYPE_CHECKING if TYPE_CHECKING: import sys + if sys.version_info >= (3, 11): from typing import NotRequired, TypedDict else: @@ -23,19 +24,23 @@ from breathe.renderer import TaggedNode from breathe.finder.factory import FinderRoot - DoxContentBlockOptions = TypedDict('DoxContentBlockOptions',{ - 'path': str, - 'project': str, - 'content-only': NotRequired[None], - 'members': NotRequired[str], - 'protected-members': NotRequired[None], - 'private-members': NotRequired[None], - 'undoc-members': NotRequired[None], - 'show': str, - 'outline': NotRequired[None], - 'no-link': NotRequired[None], - 'desc-only': NotRequired[None], - 'sort': NotRequired[None]}) + DoxContentBlockOptions = TypedDict( + "DoxContentBlockOptions", + { + "path": str, + "project": str, + "content-only": NotRequired[None], + "members": NotRequired[str], + "protected-members": NotRequired[None], + "private-members": NotRequired[None], + "undoc-members": NotRequired[None], + "show": str, + "outline": NotRequired[None], + "no-link": NotRequired[None], + "desc-only": NotRequired[None], + "sort": NotRequired[None], + }, + ) else: DoxContentBlockOptions = None FinderRoot = None @@ -44,7 +49,7 @@ class _DoxygenContentBlockDirective(BaseDirective): """Base class for namespace and group directives which have very similar behaviours""" - kind: ClassVar[Literal['group', 'page', 'namespace']] + kind: ClassVar[Literal["group", "page", "namespace"]] required_arguments = 1 optional_arguments = 1 @@ -65,7 +70,7 @@ class _DoxygenContentBlockDirective(BaseDirective): def run(self) -> list[Node]: name = self.arguments[0] - options = cast(DoxContentBlockOptions,self.options) + options = cast(DoxContentBlockOptions, self.options) try: project_info = self.project_info_factory.create_project_info(options) @@ -98,7 +103,7 @@ def run(self) -> list[Node]: # Having found the compound node for the namespace or group in the index we want to grab # the contents of it which match the filter contents_finder = self.finder_factory.create_finder_from_root( - cast(FinderRoot,node_stack[0].value), project_info + cast(FinderRoot, node_stack[0].value), project_info ) contents: list[list[TaggedNode]] = [] @@ -126,7 +131,7 @@ def run(self) -> list[Node]: mask_factory = NullMaskFactory() context = RenderContext(node_stack, mask_factory, self.directive_args) value = context.node_stack[0].value - assert isinstance(value,parser.Node) + assert isinstance(value, parser.Node) node_list.extend(object_renderer.render(value, context)) return node_list diff --git a/breathe/directives/file.py b/breathe/directives/file.py index 3a858661..1644ab33 100644 --- a/breathe/directives/file.py +++ b/breathe/directives/file.py @@ -53,7 +53,7 @@ def handle_contents(self, file_: str, project_info): mask_factory = NullMaskFactory() context = renderer.RenderContext(node_stack, mask_factory, self.directive_args) value = node_stack[0].value - assert isinstance(value,parser.Node) + assert isinstance(value, parser.Node) node_list.extend(object_renderer.render(value, context)) return node_list diff --git a/breathe/directives/function.py b/breathe/directives/function.py index 8b0b29c4..08a4dbba 100644 --- a/breathe/directives/function.py +++ b/breathe/directives/function.py @@ -22,6 +22,7 @@ if TYPE_CHECKING: import sys + if sys.version_info >= (3, 11): from typing import NotRequired, TypedDict else: @@ -29,11 +30,10 @@ from breathe import project from docutils.nodes import Node - DoxFunctionOptions = TypedDict('DoxFunctionOptions',{ - 'path': str, - 'project': str, - 'outline': NotRequired[None], - 'no-link': NotRequired[None]}) + DoxFunctionOptions = TypedDict( + "DoxFunctionOptions", + {"path": str, "project": str, "outline": NotRequired[None], "no-link": NotRequired[None]}, + ) else: DoxFunctionOptions = None @@ -81,7 +81,7 @@ def run(self) -> List[Node]: function_name = match.group(2).strip() argsStr = match.group(3) - options = cast(DoxFunctionOptions,self.options) + options = cast(DoxFunctionOptions, self.options) try: project_info = self.project_info_factory.create_project_info(options) @@ -98,7 +98,7 @@ def run(self) -> List[Node]: # Extract arguments from the function name. try: args = self._parse_args(argsStr) - except cpp.DefinitionError as e: # pyright: ignore + except cpp.DefinitionError as e: # pyright: ignore return self.create_warning( project_info, namespace="%s::" % namespace if namespace else "", @@ -157,7 +157,7 @@ def run(self) -> List[Node]: warning_nodes = [nodes.paragraph("", "", nodes.Text(formatted_message)), block] result = warning.warn(message, rendered_nodes=warning_nodes, unformatted_suffix=text) return result - except cpp.DefinitionError as error: # pyright: ignore + except cpp.DefinitionError as error: # pyright: ignore warning.context["cpperror"] = str(error) return warning.warn( "doxygenfunction: Unable to resolve function " @@ -221,7 +221,13 @@ def stripDeclarator(declarator): return paramQual def _create_function_signature( - self, node_stack: list[TaggedNode], project_info, filter_, target_handler, mask_factory, directive_args + self, + node_stack: list[TaggedNode], + project_info, + filter_, + target_handler, + mask_factory, + directive_args, ) -> str: "Standard render process used by subclasses" @@ -248,7 +254,7 @@ def _create_function_signature( context = RenderContext(node_stack, mask_factory, directive_args) node = node_stack[0].value with WithContext(object_renderer, context): - assert isinstance(node,parser.Node_memberdefType) + assert isinstance(node, parser.Node_memberdefType) # this part should be kept in sync with visit_function in sphinxrenderer name = node.name # assume we are only doing this for C++ declarations @@ -257,7 +263,7 @@ def _create_function_signature( object_renderer.create_template_prefix(node), "".join(n.astext() for n in object_renderer.render(node.type)), name, - node.argsstring or '', + node.argsstring or "", ] ) cpp_parser = cpp.DefinitionParser( @@ -266,7 +272,12 @@ def _create_function_signature( ast = cpp_parser.parse_declaration("function", "function") return str(ast) - def _resolve_function(self, matches: list[list[TaggedNode]], args: cpp.ASTParametersQualifiers | None, project_info: project.ProjectInfo): + def _resolve_function( + self, + matches: list[list[TaggedNode]], + args: cpp.ASTParametersQualifiers | None, + project_info: project.ProjectInfo, + ): if not matches: raise _NoMatchingFunctionError() diff --git a/breathe/directives/index.py b/breathe/directives/index.py index 6c4952c8..b0142df5 100644 --- a/breathe/directives/index.py +++ b/breathe/directives/index.py @@ -50,10 +50,14 @@ def handle_contents(self, project_info) -> list[Node]: ) mask_factory = NullMaskFactory() - context = RenderContext([TaggedNode(None,data_object), TaggedNode(None,RootDataObject())], mask_factory, self.directive_args) + context = RenderContext( + [TaggedNode(None, data_object), TaggedNode(None, RootDataObject())], + mask_factory, + self.directive_args, + ) value = context.node_stack[0].value - assert isinstance(value,parser.Node) + assert isinstance(value, parser.Node) try: node_list = object_renderer.render(value, context) except parser.ParserError as e: diff --git a/breathe/directives/item.py b/breathe/directives/item.py index 56ec1ba0..dcd6e5c0 100644 --- a/breathe/directives/item.py +++ b/breathe/directives/item.py @@ -23,11 +23,10 @@ from breathe.renderer import TaggedNode from breathe.renderer.filter import DoxFilter - DoxBaseItemOptions = TypedDict('DoxBaseItemOptions',{ - 'path': str, - 'project': str, - 'outline': NotRequired[None], - 'no-link': NotRequired[None]}) + DoxBaseItemOptions = TypedDict( + "DoxBaseItemOptions", + {"path": str, "project": str, "outline": NotRequired[None], "no-link": NotRequired[None]}, + ) else: DoxBaseItemOptions = None @@ -52,7 +51,7 @@ def create_finder_filter(self, namespace: str, name: str) -> DoxFilter: def run(self) -> list[Node]: options = cast(DoxBaseItemOptions, self.options) - + namespace, _, name = self.arguments[0].rpartition("::") try: diff --git a/breathe/finder/__init__.py b/breathe/finder/__init__.py index 87752173..139cb736 100644 --- a/breathe/finder/__init__.py +++ b/breathe/finder/__init__.py @@ -8,14 +8,21 @@ from breathe.renderer.filter import DoxFilter from breathe.renderer import TaggedNode, T_data_object else: - T_data_object = TypeVar('T_data_object', covariant=True) + T_data_object = TypeVar("T_data_object", covariant=True) class ItemFinder(Generic[T_data_object]): - def __init__(self, project_info: ProjectInfo, node: TaggedNode[T_data_object], item_finder_factory: DoxygenItemFinderFactory): + def __init__( + self, + project_info: ProjectInfo, + node: TaggedNode[T_data_object], + item_finder_factory: DoxygenItemFinderFactory, + ): self.node = node self.item_finder_factory: DoxygenItemFinderFactory = item_finder_factory self.project_info = project_info - def filter_(self, ancestors: list[TaggedNode], filter_: DoxFilter, matches: list[list[TaggedNode]]) -> None: + def filter_( + self, ancestors: list[TaggedNode], filter_: DoxFilter, matches: list[list[TaggedNode]] + ) -> None: raise NotImplementedError diff --git a/breathe/finder/compound.py b/breathe/finder/compound.py index 8fbb55a7..f347e182 100644 --- a/breathe/finder/compound.py +++ b/breathe/finder/compound.py @@ -60,7 +60,7 @@ def filter_(self, ancestors, filter_: DoxFilter, matches: list[list[TaggedNode]] if data_object.kind == parser.DoxMemberKind.enum: for value in data_object.enumvalue: - value_stack = [TaggedNode('enumvalue',value)] + node_stack + value_stack = [TaggedNode("enumvalue", value)] + node_stack if filter_(NodeStack(value_stack)): matches.append(value_stack) diff --git a/breathe/finder/factory.py b/breathe/finder/factory.py index f92dae8d..430bce91 100644 --- a/breathe/finder/factory.py +++ b/breathe/finder/factory.py @@ -13,7 +13,8 @@ if TYPE_CHECKING: from breathe.renderer.filter import DoxFilter - ItemFinderCreator = Callable[[ProjectInfo,TaggedNode,'DoxygenItemFinderFactory'],ItemFinder] + + ItemFinderCreator = Callable[[ProjectInfo, TaggedNode, "DoxygenItemFinderFactory"], ItemFinder] FinderRoot = Union[ parser.Node_DoxygenTypeIndex, @@ -23,7 +24,8 @@ parser.Node_compounddefType, parser.Node_sectiondefType, parser.Node_memberdefType, - parser.Node_refType] + parser.Node_refType, + ] class _CreateCompoundTypeSubFinder: @@ -37,12 +39,16 @@ def __call__(self, project_info: ProjectInfo, *args) -> indexfinder.CompoundType class DoxygenItemFinderFactory: - def __init__(self, finders: dict[type[parser.NodeOrValue], ItemFinderCreator], project_info: ProjectInfo): + def __init__( + self, finders: dict[type[parser.NodeOrValue], ItemFinderCreator], project_info: ProjectInfo + ): self.finders = finders self.project_info = project_info def create_finder(self, data_object: parser.NodeOrValue, tag: str | None = None) -> ItemFinder: - return self.finders[type(data_object)](self.project_info, TaggedNode(tag, data_object), self) + return self.finders[type(data_object)]( + self.project_info, TaggedNode(tag, data_object), self + ) class Finder: diff --git a/breathe/finder/index.py b/breathe/finder/index.py index dde3e448..8170019e 100644 --- a/breathe/finder/index.py +++ b/breathe/finder/index.py @@ -63,9 +63,10 @@ def filter_(self, ancestors: list[TaggedNode], filter_: DoxFilter, matches) -> N mem = member_stack[0].value assert isinstance(mem, parser.Node_MemberType) refid = mem.refid + def ref_filter(nstack): node = nstack.node - return isinstance(node,parser.Node_memberdefType) and node.id == refid + return isinstance(node, parser.Node_memberdefType) and node.id == refid finder.filter_(node_stack, ref_filter, matches) else: diff --git a/breathe/parser.py b/breathe/parser.py index 5609f5aa..238dea36 100644 --- a/breathe/parser.py +++ b/breathe/parser.py @@ -1,3 +1,5 @@ +# flake8: noqa + from __future__ import annotations import reprlib @@ -13,48 +15,62 @@ if TYPE_CHECKING: NodeOrValue = Node | str | None + @reprlib.recursive_repr() def node_repr(self: Node) -> str: # pragma: no cover cls = type(self) fields = [] - if isinstance(self,FrozenList): - pos = ', '.join(map(repr,self)) - fields.append(f'[{pos}]') - fields.extend(f'{field}={getattr(self,field)!r}' for field in cls._fields) - inner = ', '.join(fields) - return f'{cls.__name__}({inner})' -Node.__repr__ = node_repr # type: ignore + if isinstance(self, FrozenList): + pos = ", ".join(map(repr, self)) + fields.append(f"[{pos}]") + fields.extend(f"{field}={getattr(self,field)!r}" for field in cls._fields) + inner = ", ".join(fields) + return f"{cls.__name__}({inner})" + + +Node.__repr__ = node_repr # type: ignore + @reprlib.recursive_repr() def taggedvalue_repr(self: TaggedValue) -> str: # pragma: no cover - return f'{self.__class__.__name__}({self.name!r}, {self.value!r})' -TaggedValue.__repr__ = taggedvalue_repr # type: ignore + return f"{self.__class__.__name__}({self.name!r}, {self.value!r})" + + +TaggedValue.__repr__ = taggedvalue_repr # type: ignore + @reprlib.recursive_repr() def frozenlist_repr(self: FrozenList) -> str: # pragma: no cover - inner = ', '.join(map(repr,self)) - return f'{self.__class__.__name__}([{inner}])' -FrozenList.__repr__ = frozenlist_repr # type: ignore + inner = ", ".join(map(repr, self)) + return f"{self.__class__.__name__}([{inner}])" + + +FrozenList.__repr__ = frozenlist_repr # type: ignore + def description_has_content(node: Node_descriptionType | None) -> bool: - if node is None: return False - if bool(node.title) or len(node) > 1: return True - if not len(node): return False + if node is None: + return False + if bool(node.title) or len(node) > 1: + return True + if not len(node): + return False item = node[0] - return not isinstance(item,str) or (len(item) > 0 and not item.isspace()) + return not isinstance(item, str) or (len(item) > 0 and not item.isspace()) + class ParserError(RuntimeError): def __init__(self, message: str, filename: str, lineno: int | None = None): - super().__init__(message,lineno,filename) + super().__init__(message, lineno, filename) @property def message(self) -> str: return self.args[0] - + @property def lineno(self) -> int | None: return self.args[1] - + @property def filename(self) -> str: return self.args[2] @@ -77,18 +93,22 @@ class Parser: def __init__(self, app: Sphinx, cache: dict[str, Node_DoxygenTypeIndex | Node_DoxygenType]): self.app = app self.cache = cache - - def _parse_common(self,filename: str, right_tag: str) -> Node_DoxygenTypeIndex | Node_DoxygenType: + + def _parse_common( + self, filename: str, right_tag: str + ) -> Node_DoxygenTypeIndex | Node_DoxygenType: try: # Try to get from our cache return self.cache[filename] except KeyError: # If that fails, parse it afresh try: - with open(filename,'rb') as file: + with open(filename, "rb") as file: result = parse_file(file) if result.name != right_tag: - raise ParserError(f'expected "{right_tag}" root element, not "{result.name}"',filename) + raise ParserError( + f'expected "{right_tag}" root element, not "{result.name}"', filename + ) self.cache[filename] = result.value return result.value except ParseError as e: @@ -102,29 +122,26 @@ def parse(self, project_info: ProjectInfo) -> Node_DoxygenTypeIndex: filename = path_handler.resolve_path(self.app, project_info.project_path(), "index.xml") file_state_cache.update(self.app, filename) - r = self._parse_common(filename, 'doxygenindex') - assert isinstance(r,Node_DoxygenTypeIndex) + r = self._parse_common(filename, "doxygenindex") + assert isinstance(r, Node_DoxygenTypeIndex) return r class DoxygenCompoundParser(Parser): - def __init__(self, app: Sphinx, cache, - project_info: ProjectInfo) -> None: + def __init__(self, app: Sphinx, cache, project_info: ProjectInfo) -> None: super().__init__(app, cache) self.project_info = project_info def parse(self, refid: str) -> Node_DoxygenType: filename = path_handler.resolve_path( - self.app, - self.project_info.project_path(), - f"{refid}.xml" + self.app, self.project_info.project_path(), f"{refid}.xml" ) file_state_cache.update(self.app, filename) - r = self._parse_common(filename, 'doxygen') - assert isinstance(r,Node_DoxygenType) + r = self._parse_common(filename, "doxygen") + assert isinstance(r, Node_DoxygenType) return r @@ -141,14 +158,21 @@ def create_compound_parser(self, project_info: ProjectInfo) -> DoxygenCompoundPa @overload -def tag_name_value(x: TaggedValue[T, U]) -> tuple[T, U]: ... +def tag_name_value(x: TaggedValue[T, U]) -> tuple[T, U]: + ... + @overload -def tag_name_value(x: str) -> tuple[None,str]: ... +def tag_name_value(x: str) -> tuple[None, str]: + ... + @overload -def tag_name_value(x: TaggedValue[T, U] | str) -> tuple[T | None, U | str]: ... +def tag_name_value(x: TaggedValue[T, U] | str) -> tuple[T | None, U | str]: + ... + def tag_name_value(x): - if isinstance(x,str): return None,x - return x.name,x.value + if isinstance(x, str): + return None, x + return x.name, x.value diff --git a/breathe/project.py b/breathe/project.py index 6cd2a463..bfd700ec 100644 --- a/breathe/project.py +++ b/breathe/project.py @@ -13,14 +13,13 @@ if TYPE_CHECKING: import sys + if sys.version_info >= (3, 11): from typing import TypedDict else: from typing_extensions import TypedDict - ProjectOptions = TypedDict('ProjectOptions',{ - 'path': str, - 'project': str}) + ProjectOptions = TypedDict("ProjectOptions", {"path": str, "project": str}) class ProjectError(BreatheError): @@ -120,6 +119,8 @@ def domain_for_file(self, file_: str) -> str: class ProjectInfoFactory: + _default_build_dir: str + def __init__(self, app: Sphinx): self.app = app # note: don't access self.app.config now, as we are instantiated at setup-time. diff --git a/breathe/renderer/__init__.py b/breathe/renderer/__init__.py index 9b5fe5aa..9faacee3 100644 --- a/breathe/renderer/__init__.py +++ b/breathe/renderer/__init__.py @@ -10,12 +10,14 @@ from breathe.directives.index import RootDataObject DataObject = Union[parser.NodeOrValue, RootDataObject] - T_data_object = TypeVar('T_data_object', bound=DataObject, covariant = True) + T_data_object = TypeVar("T_data_object", bound=DataObject, covariant=True) else: - T_data_object = TypeVar('T_data_object', covariant = True) + T_data_object = TypeVar("T_data_object", covariant=True) -def format_parser_error(name: str, error: str, filename: str, state, lineno: int, do_unicode_warning: bool = False) -> list[nodes.Node]: +def format_parser_error( + name: str, error: str, filename: str, state, lineno: int, do_unicode_warning: bool = False +) -> list[nodes.Node]: warning = '%s: Unable to parse xml file "%s". ' % (name, filename) explanation = "Reported error: %s. " % error @@ -39,7 +41,7 @@ def format_parser_error(name: str, error: str, filename: str, state, lineno: int "", nodes.paragraph("", "", nodes.Text(warning)), nodes.paragraph("", "", nodes.Text(explanation)), - *unicode_explanation + *unicode_explanation, ), state.document.reporter.warning( warning + explanation + unicode_explanation_text, line=lineno @@ -51,9 +53,15 @@ class TaggedNode(NamedTuple, Generic[T_data_object]): tag: str | None value: T_data_object + class RenderContext: def __init__( - self, node_stack: list[TaggedNode], mask_factory: mask.MaskFactoryBase, directive_args, domain: str = "", child: bool = False + self, + node_stack: list[TaggedNode], + mask_factory: mask.MaskFactoryBase, + directive_args, + domain: str = "", + child: bool = False, ) -> None: self.node_stack = node_stack self.mask_factory = mask_factory @@ -61,7 +69,9 @@ def __init__( self.domain = domain self.child = child - def create_child_context(self, data_object: parser.NodeOrValue, tag: str | None = None) -> RenderContext: + def create_child_context( + self, data_object: parser.NodeOrValue, tag: str | None = None + ) -> RenderContext: node_stack = self.node_stack[:] - node_stack.insert(0, TaggedNode(tag,self.mask_factory.mask(data_object))) + node_stack.insert(0, TaggedNode(tag, self.mask_factory.mask(data_object))) return RenderContext(node_stack, self.mask_factory, self.directive_args, self.domain, True) diff --git a/breathe/renderer/filter.py b/breathe/renderer/filter.py index 227669c1..65ef0dfe 100644 --- a/breathe/renderer/filter.py +++ b/breathe/renderer/filter.py @@ -328,7 +328,8 @@ def filter(nstack: NodeStack) -> bool: # Allow anything that isn't a Node_memberdefType, or if it is only # allow the ones with a description return (not isinstance(node, parser.Node_memberdefType)) or bool( - parser.description_has_content(node.briefdescription) or parser.description_has_content(node.detaileddescription) + parser.description_has_content(node.briefdescription) + or parser.description_has_content(node.detaileddescription) ) return filter @@ -352,7 +353,7 @@ def create_class_member_filter(cls, options: DoxNamespaceOptions) -> DoxFilter: undoc_members = cls._create_undoc_members_filter(options) - prot_filter = () + prot_filter: tuple[parser.DoxProtectionKind, ...] = () if "protected-members" in options: prot_filter += (parser.DoxProtectionKind.protected,) if "private-members" in options: diff --git a/breathe/renderer/mask.py b/breathe/renderer/mask.py index 913b7fc6..1680087a 100644 --- a/breathe/renderer/mask.py +++ b/breathe/renderer/mask.py @@ -25,16 +25,16 @@ def no_parameter_names(node: parser.NodeOrValue): - assert isinstance(node,parser.Node_paramType) + assert isinstance(node, parser.Node_paramType) return parser.Node_paramType( - array = node.array, - attributes = node.attributes, - briefdescription = node.briefdescription, - declname = None, - defname = None, - defval = None, - type = node.type, - typeconstraint = node.typeconstraint + array=node.array, + attributes=node.attributes, + briefdescription=node.briefdescription, + declname=None, + defname=None, + defval=None, + type=node.type, + typeconstraint=node.typeconstraint, ) @@ -44,12 +44,16 @@ def mask(self, data_object): class MaskFactory(MaskFactoryBase): - def __init__(self, lookup : dict[type[parser.NodeOrValue],Callable[[parser.NodeOrValue],parser.NodeOrValue]]): + def __init__( + self, + lookup: dict[type[parser.NodeOrValue], Callable[[parser.NodeOrValue], parser.NodeOrValue]], + ): self.lookup = lookup def mask(self, data_object: parser.NodeOrValue) -> parser.NodeOrValue: m = self.lookup.get(type(data_object)) - if m is None: return data_object + if m is None: + return data_object return m(data_object) diff --git a/breathe/renderer/sphinxrenderer.py b/breathe/renderer/sphinxrenderer.py index c4999374..e399fb3d 100644 --- a/breathe/renderer/sphinxrenderer.py +++ b/breathe/renderer/sphinxrenderer.py @@ -1110,8 +1110,13 @@ def description(self, node: HasDescriptions) -> list[Node]: "para", parser.Node_docParaType( [ - parser.TaggedValue[Literal["parameterlist"], parser.Node_docParamListType]( - "parameterlist", parser.Node_docParamListType(params, kind=parser.DoxParamListKind.param) + parser.TaggedValue[ + Literal["parameterlist"], parser.Node_docParamListType + ]( + "parameterlist", + parser.Node_docParamListType( + params, kind=parser.DoxParamListKind.param + ), ) ] ), @@ -2803,6 +2808,7 @@ def visit_docdotfile(self, node: parser.Node_docImageFileType) -> list[Node]: graph_node["options"] = {"docname": dot_file_path} caption = "" if len(node) == 0 else parser.tag_name_value(node[0])[1] if caption: + assert isinstance(caption, str) caption_node = nodes.caption(caption, "") caption_node += nodes.Text(caption) return [nodes.figure("", graph_node, caption_node)] @@ -2822,7 +2828,10 @@ def visit_docgraph(self, tag: str, node: parser.Node_graphType) -> list[Node]: caption = f"Include dependency graph for {parent.compoundname}:" elif tag == "invincdepgraph": direction = "back" - caption = f"This graph shows which files directly or indirectly include {parent.compoundname}:" + caption = ( + "This graph shows which files directly or indirectly " + + f" include {parent.compoundname}:" + ) elif tag == "inheritancegraph": caption = f"Inheritance diagram for {parent.compoundname}:" else: diff --git a/documentation/source/conf.py b/documentation/source/conf.py index c1cf8c34..288306e9 100644 --- a/documentation/source/conf.py +++ b/documentation/source/conf.py @@ -60,7 +60,6 @@ git_tag = git_tag.decode("ascii") if travis_build: - # Don't attempt to set the path as breathe is installed to virtualenv on travis # Set values with simple strings @@ -69,7 +68,6 @@ documentation_build = "travis" elif read_the_docs_build: - # On RTD we'll be in the 'source' directory sys.path.append("../../") @@ -96,7 +94,6 @@ documentation_build = "readthedocs_latest" else: - # For our usual dev build we'll be in the 'documentation' directory but Sphinx seems to set the # current working directory to 'source' so we append relative to that sys.path.append("../../") @@ -396,7 +393,6 @@ def generate_doxygen_xml(app): read_the_docs_build = os.environ.get("READTHEDOCS", None) == "True" if read_the_docs_build: - # Attempt to build the doxygen files on the RTD server. Explicitly override the path/name used # for executing doxygen to simply be 'doxygen' to stop the makefiles looking for the executable. # This is because the `which doxygen` effort seemed to fail when tested on the RTD server. @@ -406,7 +402,6 @@ def generate_doxygen_xml(app): def setup(app): - # Approach borrowed from the Sphinx docs app.add_object_type( "confval", diff --git a/pyproject.toml b/pyproject.toml index 9880c562..283e1644 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,5 +4,5 @@ requires = ["setuptools", "jinja2", "perfect-hash"] [tool.black] line-length = 100 extend-exclude = ''' -^/breathe/parser/.* +^/examples/.* | ^/tests/data/.* ''' diff --git a/setup.py b/setup.py index 855ed11a..7f7dd29c 100644 --- a/setup.py +++ b/setup.py @@ -18,7 +18,7 @@ # add xml_parser_generator to the import path list base_dir = os.path.dirname(os.path.realpath(__file__)) -sys.path.insert(0,os.path.join(base_dir,'xml_parser_generator')) +sys.path.insert(0, os.path.join(base_dir, "xml_parser_generator")) import make_parser @@ -38,37 +38,38 @@ extra_user_options = [ - ('cpp-opts=',None, - 'extra command line arguments for the compiler'), - ('ld-opts=',None, - 'extra command line arguments for the linker')] + ("cpp-opts=", None, "extra command line arguments for the compiler"), + ("ld-opts=", None, "extra command line arguments for the linker"), +] + class CustomBuild(build): - """Add extra parameters for 'build' to pass to 'build_ext' - """ + """Add extra parameters for 'build' to pass to 'build_ext'""" + user_options = build.user_options + extra_user_options def initialize_options(self): super().initialize_options() - self.cpp_opts = '' - self.ld_opts = '' + self.cpp_opts = "" + self.ld_opts = "" def finalize_options(self): super().finalize_options() self.cpp_opts = split_quoted(self.cpp_opts) self.ld_opts = split_quoted(self.ld_opts) + class CustomBuildExt(build_ext): """Extend build_ext to automatically generate _parser.c""" user_options = build_ext.user_options + extra_user_options - SCHEMA_FILE = os.path.join('xml_parser_generator','schema.json') - MODULE_TEMPLATE = os.path.join('xml_parser_generator','module_template.c') - STUBS_TEMPLATE = os.path.join('xml_parser_generator','stubs_template.pyi') - MAKER_SOURCE = os.path.join('xml_parser_generator','make_parser.py') + SCHEMA_FILE = os.path.join("xml_parser_generator", "schema.json") + MODULE_TEMPLATE = os.path.join("xml_parser_generator", "module_template.c.in") + STUBS_TEMPLATE = os.path.join("xml_parser_generator", "stubs_template.pyi.in") + MAKER_SOURCE = os.path.join("xml_parser_generator", "make_parser.py") - DEPENDENCIES = [SCHEMA_FILE,MODULE_TEMPLATE,STUBS_TEMPLATE,MAKER_SOURCE] + DEPENDENCIES = [SCHEMA_FILE, MODULE_TEMPLATE, STUBS_TEMPLATE, MAKER_SOURCE] def initialize_options(self): super().initialize_options() @@ -81,9 +82,7 @@ def finalize_options(self): if self.ld_opts is not None: self.ld_opts = split_quoted(self.ld_opts) - self.set_undefined_options('build', - ('cpp_opts','cpp_opts'), - ('ld_opts','ld_opts')) + self.set_undefined_options("build", ("cpp_opts", "cpp_opts"), ("ld_opts", "ld_opts")) super().finalize_options() def build_extensions(self): @@ -94,40 +93,40 @@ def build_extensions(self): # mostly depend on file read and memory allocation speed. Thus it's # better to optimize for size. c = self.compiler.compiler_type - if c == 'msvc': - self.extensions[0].extra_compile_args = ['/O1'] - elif c in {'unix','cygwin','mingw32'}: - self.extensions[0].extra_compile_args = ['-Os'] - self.extensions[0].extra_link_args = ['-s'] + if c == "msvc": + self.extensions[0].extra_compile_args = ["/O1"] + elif c in {"unix", "cygwin", "mingw32"}: + self.extensions[0].extra_compile_args = ["-Os"] + self.extensions[0].extra_link_args = ["-s"] - source = os.path.join(self.build_temp,self.extensions[0].name+'.c') + source = os.path.join(self.build_temp, self.extensions[0].name + ".c") # put the stub file in the same place that the extension module will be ext_dest = self.get_ext_fullpath(self.extensions[0].name) libdir = os.path.dirname(ext_dest) - stub = os.path.join(libdir,self.extensions[0].name+'.pyi') + stub = os.path.join(libdir, self.extensions[0].name + ".pyi") - mkpath(self.build_temp,dry_run=self.dry_run) - mkpath(libdir,dry_run=self.dry_run) + mkpath(self.build_temp, dry_run=self.dry_run) + mkpath(libdir, dry_run=self.dry_run) - if (self.force - or newer_group(self.DEPENDENCIES,source) - or newer_group(self.DEPENDENCIES,stub)): + if ( + self.force + or newer_group(self.DEPENDENCIES, source) + or newer_group(self.DEPENDENCIES, stub) + ): log.info(f'generating "{source}" and "{stub}" from templates') if not self.dry_run: make_parser.generate_from_json( - self.SCHEMA_FILE, - self.MODULE_TEMPLATE, - self.STUBS_TEMPLATE, - source, - stub) + self.SCHEMA_FILE, self.MODULE_TEMPLATE, self.STUBS_TEMPLATE, source, stub + ) else: log.debug(f'"{source}" and "{stub}" are up-to-date') - + self.extensions[0].sources.append(source) super().build_extensions() + setup( name="breathe", version=__version__, @@ -155,19 +154,19 @@ def build_extensions(self): ], platforms="any", packages=find_packages(), - ext_package='breathe', + ext_package="breathe", ext_modules=[ Extension( - '_parser', - [], # source is generated by CustomBuildExt + "_parser", + [], # source is generated by CustomBuildExt depends=CustomBuildExt.DEPENDENCIES, - libraries=['expat'], + libraries=["expat"], define_macros=[ - ('PARSER_PY_LIMITED_API','0x03080000'), # set Stable ABI version to 3.8 - ('MODULE_NAME','_parser'), - ('FULL_MODULE_STR','"breathe._parser"') + ("PARSER_PY_LIMITED_API", "0x03080000"), # set Stable ABI version to 3.8 + ("MODULE_NAME", "_parser"), + ("FULL_MODULE_STR", '"breathe._parser"'), ], - py_limited_api=True + py_limited_api=True, ) ], include_package_data=True, @@ -177,5 +176,5 @@ def build_extensions(self): ], }, install_requires=requires, - cmdclass={'build': CustomBuild, 'build_ext': CustomBuildExt} + cmdclass={"build": CustomBuild, "build_ext": CustomBuildExt}, ) diff --git a/tests/conftest.py b/tests/conftest.py index b620dca5..c8ea8403 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -8,6 +8,7 @@ rootdir, ) + @pytest.fixture(scope="function") def app(test_params, app_params, make_app, shared_result): """ @@ -15,7 +16,7 @@ def app(test_params, app_params, make_app, shared_result): """ args, kwargs = app_params assert "srcdir" in kwargs - kwargs["srcdir"].mkdir(parents=True,exist_ok=True) + kwargs["srcdir"].mkdir(parents=True, exist_ok=True) (kwargs["srcdir"] / "conf.py").write_text("") app_ = make_app(*args, **kwargs) yield app_ @@ -28,4 +29,4 @@ def app(test_params, app_params, make_app, shared_result): print("# warning:", "\n" + app_._warning.getvalue()) if test_params["shared_result"]: - shared_result.store(test_params["shared_result"], app_) \ No newline at end of file + shared_result.store(test_params["shared_result"], app_) diff --git a/tests/data/examples/test_python/compare.xml b/tests/data/examples/test_python/compare.xml new file mode 100644 index 00000000..dbb64780 --- /dev/null +++ b/tests/data/examples/test_python/compare.xml @@ -0,0 +1,76 @@ + + + + + + module pyexample + + Documentation for this module. + More details. + + Functions + + + func + + Documentation for a function. + More details. + + + + + + class PyClass + + Documentation for a class. + More details. + + Public Functions + + + __init__self + + The constructor. + + + + + PyMethodself + + Documentation for a method. + + + Parameters + + self – The object pointer. + + + + + + + + Public Static Attributes + + + classVar = 0 + + A class variable. + + + + + Protected Attributes + + + _memVar + + a member variable + + + + + + + + diff --git a/tests/data/examples/test_python/input.rst b/tests/data/examples/test_python/input.rst new file mode 100644 index 00000000..52124a2d --- /dev/null +++ b/tests/data/examples/test_python/input.rst @@ -0,0 +1 @@ +.. doxygenfile:: pyexample.py diff --git a/tests/data/examples/test_python/pyexample.py b/tests/data/examples/test_python/pyexample.py new file mode 100644 index 00000000..34c09925 --- /dev/null +++ b/tests/data/examples/test_python/pyexample.py @@ -0,0 +1,31 @@ +## @package pyexample +# Documentation for this module. +# +# More details. + +## Documentation for a function. +# +# More details. +def func(): + pass + + +## Documentation for a class. +# +# More details. +class PyClass: + + ## The constructor. + def __init__(self): + self._memVar = 0 + + ## Documentation for a method. + # @param self The object pointer. + def PyMethod(self): + pass + + ## A class variable. + classVar = 0 + + ## @var _memVar + # a member variable diff --git a/tests/test_examples.py b/tests/test_examples.py index d3b37828..afcbc346 100644 --- a/tests/test_examples.py +++ b/tests/test_examples.py @@ -25,18 +25,19 @@ ALIASES += "inlinerst=\\verbatim embed:rst:inline" """ -C_FILE_SUFFIXES = frozenset(('.h', '.c', '.hpp', '.cpp')) +C_FILE_SUFFIXES = frozenset((".h", ".c", ".hpp", ".cpp")) IGNORED_ELEMENTS = frozenset(()) BUFFER_SIZE = 0x1000 -TEST_DATA_DIR = pathlib.Path(__file__).parent / 'data' +TEST_DATA_DIR = pathlib.Path(__file__).parent / "data" DEFAULT_CONF = { - 'project': 'test', - 'breathe_default_project': 'example', - 'breathe_show_include': False, - 'extensions': ['breathe','sphinx.ext.graphviz']} + "project": "test", + "breathe_default_project": "example", + "breathe_show_include": False, + "extensions": ["breathe", "sphinx.ext.graphviz"], +} class XMLEventType(enum.Enum): @@ -44,18 +45,21 @@ class XMLEventType(enum.Enum): E_END = enum.auto() E_TEXT = enum.auto() + @dataclasses.dataclass class XMLElement: name: str - attr: dict[str,str] + attr: dict[str, str] line_no: int column_no: int + @dataclasses.dataclass class XMLElementEnd: line_no: int column_no: int + @dataclasses.dataclass class XMLTextElement: value: str @@ -65,60 +69,60 @@ class XMLTextElement: def xml_stream(infile): """XML pull parser. - + This is similar to xml.dom.pulldom.parse, except the locations of the elements are tracked.""" p = expat.ParserCreate() pending_events = [] - pending_text = '' + pending_text = "" def dispatch_text(): nonlocal pending_text if pending_text: - pending_events.append(( - XMLEventType.E_TEXT, - XMLTextElement( - pending_text, - p.CurrentLineNumber, - p.CurrentColumnNumber))) - pending_text = '' - - def handle_start(name,attr): + pending_events.append( + ( + XMLEventType.E_TEXT, + XMLTextElement(pending_text, p.CurrentLineNumber, p.CurrentColumnNumber), + ) + ) + pending_text = "" + + def handle_start(name, attr): dispatch_text() - pending_events.append(( - XMLEventType.E_START, - XMLElement( - name, - attr, - p.CurrentLineNumber, - p.CurrentColumnNumber))) + pending_events.append( + ( + XMLEventType.E_START, + XMLElement(name, attr, p.CurrentLineNumber, p.CurrentColumnNumber), + ) + ) + p.StartElementHandler = handle_start def handle_end(_): dispatch_text() - pending_events.append(( - XMLEventType.E_END, - XMLElementEnd( - p.CurrentLineNumber, - p.CurrentColumnNumber))) + pending_events.append( + (XMLEventType.E_END, XMLElementEnd(p.CurrentLineNumber, p.CurrentColumnNumber)) + ) + p.EndElementHandler = handle_end def handle_text(data): nonlocal pending_text pending_text += data + p.CharacterDataHandler = handle_text while True: data = infile.read(BUFFER_SIZE) if not data: dispatch_text() - p.Parse(data,True) + p.Parse(data, True) yield from pending_events break - p.Parse(data,False) + p.Parse(data, False) if pending_events: yield from pending_events pending_events.clear() @@ -127,6 +131,7 @@ def handle_text(data): def get_individual_tests(): return (TEST_DATA_DIR / "examples").glob("test_*") + def filtered_xml(infile): ignore = 0 for event, node in xml_stream(infile): @@ -147,72 +152,90 @@ def filtered_xml(infile): node.value = text yield event, node + def conf_overrides(extra): conf = DEFAULT_CONF.copy() conf.update(extra) return conf + def compare_xml(generated, model): event_str = { - XMLEventType.E_START: 'element start', - XMLEventType.E_END: 'element end', - XMLEventType.E_TEXT: 'text' + XMLEventType.E_START: "element start", + XMLEventType.E_END: "element end", + XMLEventType.E_TEXT: "text", } with open(generated) as o_file, open(model) as c_file: - for o, c in zip(filtered_xml(o_file),filtered_xml(c_file)): + for o, c in zip(filtered_xml(o_file), filtered_xml(c_file)): o_type, o_node = o c_type, c_node = c - assert o_type == c_type, f'at line {o_node.line_no}: found {event_str[o_type]} when expecting {event_str[c_type]}' + assert ( + o_type == c_type + ), f"at line {o_node.line_no}: found {event_str[o_type]} when expecting {event_str[c_type]}" if o_type == XMLEventType.E_START: - assert o_node.name == c_node.name, f'wrong tag at line {o_node.line_no}: expected {c_node.name}, found {o_node.name}' + assert ( + o_node.name == c_node.name + ), f"wrong tag at line {o_node.line_no}: expected {c_node.name}, found {o_node.name}" # ignore extra attributes in o_node for key, value in c_node.attr.items(): - assert key in o_node.attr, f'missing attribute at line {o_node.line_no}: {key}' + assert key in o_node.attr, f"missing attribute at line {o_node.line_no}: {key}" o_value = o_node.attr[key] - assert o_value == value, f'wrong value for attribute "{key}" at line {o_node.line_no}: expected "{value}", found "{o_value}"' + assert ( + o_value == value + ), f'wrong value for attribute "{key}" at line {o_node.line_no}: expected "{value}", found "{o_value}"' elif o_type == XMLEventType.E_TEXT: - assert o_node.value == c_node.value, f'wrong content at line {o_node.line_no}: expected "{c_node.value}", found "{o_node.value}"' + assert ( + o_node.value == c_node.value + ), f'wrong content at line {o_node.line_no}: expected "{c_node.value}", found "{o_node.value}"' + -@pytest.mark.parametrize('test_input', get_individual_tests()) +@pytest.mark.parametrize("test_input", get_individual_tests()) def test_example(make_app, tmp_path, test_input, monkeypatch): monkeypatch.chdir(test_input) - doxygen = shutil.which('doxygen') + doxygen = shutil.which("doxygen") if doxygen is None: - raise ValueError('cannot find doxygen executable') + raise ValueError("cannot find doxygen executable") doxyfile = tmp_path / "Doxyfile" doxycontent = DOXYFILE_TEMPLATE.format(output=tmp_path) - extra_opts = test_input / 'extra_dox_opts.txt' + extra_opts = test_input / "extra_dox_opts.txt" if extra_opts.exists(): doxycontent += extra_opts.read_text() doxyfile.write_text(doxycontent) (tmp_path / "conf.py").touch() shutil.copyfile(test_input / "input.rst", tmp_path / "index.rst") - subprocess.run([doxygen, doxyfile], check = True) + subprocess.run([doxygen, doxyfile], check=True) make_app( - buildername='xml', + buildername="xml", srcdir=tmp_path, - confoverrides=conf_overrides({ - 'breathe_projects': {'example': str(tmp_path / "xml")}})).build() + confoverrides=conf_overrides({"breathe_projects": {"example": str(tmp_path / "xml")}}), + ).build() + + compare_xml(tmp_path / "_build" / "xml" / "index.xml", test_input / "compare.xml") - compare_xml(tmp_path / '_build' / 'xml' / 'index.xml', test_input / 'compare.xml') def test_auto(make_app, tmp_path, monkeypatch): - test_input = TEST_DATA_DIR / 'auto' + test_input = TEST_DATA_DIR / "auto" monkeypatch.chdir(test_input) (tmp_path / "conf.py").touch() shutil.copyfile(test_input / "input.rst", tmp_path / "index.rst") make_app( - buildername='xml', + buildername="xml", srcdir=tmp_path, - confoverrides=conf_overrides({ - 'breathe_projects_source': {'example': (test_input, ['auto_class.h', 'auto_function.h'])}})).build() - - compare_xml(tmp_path / '_build' / 'xml' / 'index.xml', test_input / 'compare.xml') + confoverrides=conf_overrides( + { + "breathe_projects_source": { + "example": (test_input, ["auto_class.h", "auto_function.h"]) + } + } + ), + ).build() + + compare_xml(tmp_path / "_build" / "xml" / "index.xml", test_input / "compare.xml") diff --git a/tests/test_filters.py b/tests/test_filters.py index 08b8e55e..580cef28 100644 --- a/tests/test_filters.py +++ b/tests/test_filters.py @@ -9,17 +9,15 @@ from breathe.renderer.filter import FilterFactory, NodeStack -DEFAULT_OPTS = opts = { - 'path': '', - 'project': '', - 'membergroups': '', - 'show': ''} +DEFAULT_OPTS = opts = {"path": "", "project": "", "membergroups": "", "show": ""} + @pytest.fixture(scope="module") def class_doc(): with open(os.path.join(os.path.dirname(__file__), "data", "classSample.xml"), "rb") as fid: return parser.parse_file(fid).value + class SampleMembers(NamedTuple): public_field: NodeStack public_method: NodeStack @@ -28,37 +26,39 @@ class SampleMembers(NamedTuple): private_field: NodeStack private_method: NodeStack + @pytest.fixture def members(class_doc): - common = [ - TaggedNode(None, class_doc.compounddef[0]), - TaggedNode(None, class_doc) - ] + common = [TaggedNode(None, class_doc.compounddef[0]), TaggedNode(None, class_doc)] memberdefs = {} for sect in class_doc.compounddef[0].sectiondef: member = sect.memberdef[0] - memberdefs[member.name] = NodeStack([TaggedNode(None, member), TaggedNode(None, sect)] + common) + memberdefs[member.name] = NodeStack( + [TaggedNode(None, member), TaggedNode(None, sect)] + common + ) return SampleMembers( - memberdefs['public_field'], - memberdefs['public_method'], - memberdefs['protected_field'], - memberdefs['protected_method'], - memberdefs['private_field'], - memberdefs['private_method'] + memberdefs["public_field"], + memberdefs["public_method"], + memberdefs["protected_field"], + memberdefs["protected_method"], + memberdefs["private_field"], + memberdefs["private_method"], ) + def create_class_filter(app, extra_ops): opts = DEFAULT_OPTS.copy() opts.update(extra_ops) - return FilterFactory(app).create_class_filter('Sample', opts) + return FilterFactory(app).create_class_filter("Sample", opts) + def test_members(app, members): app.config.breathe_default_members = [] - filter = create_class_filter(app,{}) + filter = create_class_filter(app, {}) assert not filter(members.public_field) assert not filter(members.public_method) @@ -67,21 +67,27 @@ def test_members(app, members): assert not filter(members.private_field) assert not filter(members.private_method) + bools = (True, False) -@pytest.mark.parametrize('public', bools) -@pytest.mark.parametrize('private', bools) -@pytest.mark.parametrize('protected', bools) -@pytest.mark.parametrize('undocumented', bools) + +@pytest.mark.parametrize("public", bools) +@pytest.mark.parametrize("private", bools) +@pytest.mark.parametrize("protected", bools) +@pytest.mark.parametrize("undocumented", bools) def test_public_class_members(app, members, public, private, protected, undocumented): app.config.breathe_default_members = [] opts = {} - if public: opts['members'] = None - if private: opts['private-members'] = None - if protected: opts['protected-members'] = None - if undocumented: opts['undoc-members'] = None - filter = create_class_filter(app,opts) + if public: + opts["members"] = None + if private: + opts["private-members"] = None + if protected: + opts["protected-members"] = None + if undocumented: + opts["undoc-members"] = None + filter = create_class_filter(app, opts) assert filter(members.public_field) == public assert filter(members.public_method) == (public and undocumented) @@ -90,12 +96,13 @@ def test_public_class_members(app, members, public, private, protected, undocume assert filter(members.private_field) == private assert filter(members.private_method) == (private and undocumented) + def test_specific_class_members(app, members): app.config.breathe_default_members = [] - filter = create_class_filter(app,{ - 'members': 'public_method,protected_method,private_field', - 'undoc-members': None}) + filter = create_class_filter( + app, {"members": "public_method,protected_method,private_field", "undoc-members": None} + ) assert not filter(members.public_field) assert filter(members.public_method) @@ -104,21 +111,28 @@ def test_specific_class_members(app, members): assert filter(members.private_field) assert not filter(members.private_method) + def test_nested_class_filtered(app): app.config.breathe_default_members = [] - doc = parser.parse_str(""" + doc = parser.parse_str( + """ sample.hpp Sample Sample::Inner - """) + """ + ) compounddef = doc.value.compounddef[0] ref_outer, ref_inner = compounddef.innerclass - filter = FilterFactory(app).create_file_filter('sample.hpp', DEFAULT_OPTS, init_valid_names=('Sample','Sample::Inner')) - assert filter(NodeStack([TaggedNode('innerclass',ref_outer), TaggedNode(None, compounddef)])) - assert not filter(NodeStack([TaggedNode('innerclass',ref_inner), TaggedNode(None, compounddef)])) + filter = FilterFactory(app).create_file_filter( + "sample.hpp", DEFAULT_OPTS, init_valid_names=("Sample", "Sample::Inner") + ) + assert filter(NodeStack([TaggedNode("innerclass", ref_outer), TaggedNode(None, compounddef)])) + assert not filter( + NodeStack([TaggedNode("innerclass", ref_inner), TaggedNode(None, compounddef)]) + ) diff --git a/tests/test_parser.py b/tests/test_parser.py index 78a3272d..0197d6f9 100644 --- a/tests/test_parser.py +++ b/tests/test_parser.py @@ -1,6 +1,7 @@ import pytest from breathe import parser + def test_bad_content(): xml = """ @@ -25,6 +26,7 @@ def test_bad_content(): parser.parse_str(xml) assert exc.value.lineno == 6 + def test_malformed(): xml = """ @@ -38,6 +40,7 @@ def test_malformed(): with pytest.raises(parser.ParseError): parser.parse_str(xml) + def test_unknown_tag(): xml = """ @@ -64,4 +67,4 @@ def test_unknown_tag(): with pytest.warns(parser.ParseWarning) as record: parser.parse_str(xml) assert len(record) == 1 - assert 'Warning on line 5:' in str(record[0].message) + assert "Warning on line 5:" in str(record[0].message) diff --git a/tests/test_renderer.py b/tests/test_renderer.py index 6b3846ab..5eb9abe1 100644 --- a/tests/test_renderer.py +++ b/tests/test_renderer.py @@ -1,5 +1,4 @@ import os -import pytest import sphinx.locale import sphinx.addnodes @@ -12,10 +11,10 @@ sphinx.locale.init([], "") COMMON_ARGS_memberdefType = { - 'id': '', - 'prot': parser.DoxProtectionKind.public, - 'static': False, - 'location': parser.Node_locationType(file = '', line = 0) + "id": "", + "prot": parser.DoxProtectionKind.public, + "static": False, + "location": parser.Node_locationType(file="", line=0), } @@ -222,7 +221,7 @@ def render( compound_parser, (lambda nstack: True), ) - r.context = MockContext(app, [renderer.TaggedNode(None,member_def)], domain, options) + r.context = MockContext(app, [renderer.TaggedNode(None, member_def)], domain, options) return r.render(member_def) @@ -234,9 +233,7 @@ def test_render_func(app): name="foo", argsstring="(int)", virt=parser.DoxVirtualKind.non_virtual, - param=[ - parser.Node_paramType(type=parser.Node_linkedTextType(["int"])) - ], + param=[parser.Node_paramType(type=parser.Node_linkedTextType(["int"]))], **COMMON_ARGS_memberdefType ) signature = find_node(render(app, member_def), "desc_signature") @@ -260,7 +257,11 @@ def test_render_func(app): def test_render_typedef(app): member_def = parser.Node_memberdefType( - kind=parser.DoxMemberKind.typedef, definition="typedef int foo", type=parser.Node_linkedTextType(["int"]), name="foo", **COMMON_ARGS_memberdefType + kind=parser.DoxMemberKind.typedef, + definition="typedef int foo", + type=parser.Node_linkedTextType(["int"]), + name="foo", + **COMMON_ARGS_memberdefType ) signature = find_node(render(app, member_def), "desc_signature") assert signature.astext() == "typedef int foo" @@ -268,7 +269,11 @@ def test_render_typedef(app): def test_render_c_typedef(app): member_def = parser.Node_memberdefType( - kind=parser.DoxMemberKind.typedef, definition="typedef unsigned int bar", type=parser.Node_linkedTextType(["unsigned int"]), name="bar", **COMMON_ARGS_memberdefType + kind=parser.DoxMemberKind.typedef, + definition="typedef unsigned int bar", + type=parser.Node_linkedTextType(["unsigned int"]), + name="bar", + **COMMON_ARGS_memberdefType ) signature = find_node(render(app, member_def, domain="c"), "desc_signature") assert signature.astext() == "typedef unsigned int bar" @@ -298,7 +303,11 @@ def test_render_c_function_typedef(app): def test_render_using_alias(app): member_def = parser.Node_memberdefType( - kind=parser.DoxMemberKind.typedef, definition="using foo = int", type=parser.Node_linkedTextType(["int"]), name="foo", **COMMON_ARGS_memberdefType + kind=parser.DoxMemberKind.typedef, + definition="using foo = int", + type=parser.Node_linkedTextType(["int"]), + name="foo", + **COMMON_ARGS_memberdefType ) signature = find_node(render(app, member_def), "desc_signature") assert signature.astext() == "using foo = int" @@ -421,7 +430,9 @@ def test_render_define_initializer(app): def test_render_define_no_initializer(app): sphinx.addnodes.setup(app) - member_def = parser.Node_memberdefType(kind=parser.DoxMemberKind.define, name="USE_MILK", **COMMON_ARGS_memberdefType) + member_def = parser.Node_memberdefType( + kind=parser.DoxMemberKind.define, name="USE_MILK", **COMMON_ARGS_memberdefType + ) signature = find_node(render(app, member_def), "desc_signature") assert signature.astext() == "USE_MILK" @@ -431,15 +442,22 @@ def test_render_innergroup(app): mock_compound_parser = MockCompoundParser( { refid: parser.Node_compounddefType( - kind=parser.DoxCompoundKind.group, compoundname="InnerGroup", briefdescription=parser.Node_descriptionType(["InnerGroup"]), - id='', prot=parser.DoxProtectionKind.public + kind=parser.DoxCompoundKind.group, + compoundname="InnerGroup", + briefdescription=parser.Node_descriptionType(["InnerGroup"]), + id="", + prot=parser.DoxProtectionKind.public, ) } ) ref = parser.Node_refType(["InnerGroup"], refid=refid) compound_def = parser.Node_compounddefType( - kind=parser.DoxCompoundKind.group, compoundname="OuterGroup", briefdescription=parser.Node_descriptionType(["OuterGroup"]), innergroup=[ref], - id='', prot=parser.DoxProtectionKind.public + kind=parser.DoxCompoundKind.group, + compoundname="OuterGroup", + briefdescription=parser.Node_descriptionType(["OuterGroup"]), + innergroup=[ref], + id="", + prot=parser.DoxProtectionKind.public, ) assert all( el.astext() != "InnerGroup" @@ -479,14 +497,18 @@ def get_directive(app): def get_matches(datafile) -> tuple[list[str], list[list[renderer.TaggedNode]]]: argsstrings = [] - with open(os.path.join(os.path.dirname(__file__), "data", datafile), 'rb') as fid: + with open(os.path.join(os.path.dirname(__file__), "data", datafile), "rb") as fid: doc = parser.parse_file(fid) - assert isinstance(doc.value,parser.Node_DoxygenType) + assert isinstance(doc.value, parser.Node_DoxygenType) sectiondef = doc.value.compounddef[0].sectiondef[0] for child in sectiondef.memberdef: - if child.argsstring: argsstrings.append(child.argsstring) - matches = [[renderer.TaggedNode(None, m), renderer.TaggedNode(None, sectiondef)] for m in sectiondef.memberdef] + if child.argsstring: + argsstrings.append(child.argsstring) + matches = [ + [renderer.TaggedNode(None, m), renderer.TaggedNode(None, sectiondef)] + for m in sectiondef.memberdef + ] return argsstrings, matches diff --git a/tests/test_utils.py b/tests/test_utils.py index 6ecd8dee..56d1dd05 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -6,7 +6,6 @@ class TestUtils(TestCase): def test_param_decl(self): - # From xml from: examples/specific/parameters.h xml = """ @@ -53,7 +52,7 @@ def test_param_decl(self): """ doc = parser.parse_str(xml) - assert isinstance(doc.value,parser.Node_DoxygenType) + assert isinstance(doc.value, parser.Node_DoxygenType) memberdef = doc.value.compounddef[0].sectiondef[0].memberdef[0] diff --git a/xml_parser_generator/make_parser.py b/xml_parser_generator/make_parser.py index ec4d6520..4a041c0d 100644 --- a/xml_parser_generator/make_parser.py +++ b/xml_parser_generator/make_parser.py @@ -8,15 +8,15 @@ import keyword import collections -from typing import Any,Callable,cast,Literal,NamedTuple,NoReturn,TYPE_CHECKING,TypeVar +from typing import Any, Callable, cast, Literal, NamedTuple, NoReturn, TYPE_CHECKING, TypeVar import jinja2 import perfect_hash if TYPE_CHECKING: - from collections.abc import Iterable,Sequence + from collections.abc import Iterable, Sequence -T = TypeVar('T') +T = TypeVar("T") # The minimum number of items a set should have before using a hash-based @@ -25,91 +25,106 @@ SPLIT_LINE_ITEM_THRESHOLD = 5 -BUILTIN_ATTR_SCHEMA_TYPES = [('string','str'),('DoxBool','bool'),('integer','int'),('empty','None')] +BUILTIN_ATTR_SCHEMA_TYPES = [ + ("string", "str"), + ("DoxBool", "bool"), + ("integer", "int"), + ("empty", "None"), +] -def comma_join(items: Sequence[str],indent:int=4): +def comma_join(items: Sequence[str], indent: int = 4): if len(items) < SPLIT_LINE_ITEM_THRESHOLD: - return ', '.join(items) + return ", ".join(items) + + return (",\n" + " " * indent).join(items) - return (',\n' + ' '*indent).join(items) class ContentType(enum.Enum): bare = enum.auto() tuple = enum.auto() union = enum.auto() + @dataclasses.dataclass(slots=True) class TypeRef: name: str py_name: str - type: str|SchemaType + type: str | SchemaType is_list: bool min_items: Literal[0] | Literal[1] - def py_type(self,as_param=False) -> str: - assert isinstance(self.type,SchemaType) + def py_type(self, as_param=False) -> str: + assert isinstance(self.type, SchemaType) if self.is_list: - container = 'Iterable' if as_param else 'FrozenList' - return f'{container}[{self.type.py_name}]' + container = "Iterable" if as_param else "FrozenList" + return f"{container}[{self.type.py_name}]" if self.min_items == 0: - return f'{self.type.py_name} | None' + return f"{self.type.py_name} | None" return self.type.py_name + @dataclasses.dataclass(slots=True) class Attribute: name: str py_name: str - type: str|AttributeType + type: str | AttributeType optional: bool - def py_type(self,as_param=False) -> str: - assert isinstance(self.type,SchemaType) + def py_type(self, as_param=False) -> str: + assert isinstance(self.type, SchemaType) if self.optional: - return f'{self.type.py_name} | None' + return f"{self.type.py_name} | None" return self.type.py_name + @dataclasses.dataclass(slots=True) class SchemaType: name: str def __str__(self): return self.name - + def content_names(self) -> Iterable[str]: return [] - + @property def py_name(self) -> str: raise NotImplementedError + @dataclasses.dataclass(slots=True) class AttributeType(SchemaType): pass + @dataclasses.dataclass(slots=True) class BuiltinType(SchemaType): py_name: str + class SpType(BuiltinType): __slots__ = () -class BuiltinAttributeType(BuiltinType,AttributeType): + +class BuiltinAttributeType(BuiltinType, AttributeType): __slots__ = () + class OtherAttrAction(enum.Enum): ignore = enum.auto() error = enum.auto() + @dataclasses.dataclass(slots=True) class ElementType(SchemaType): - bases: list[str|SchemaType] - attributes: dict[str,Attribute] + bases: list[str | SchemaType] + attributes: dict[str, Attribute] other_attr: OtherAttrAction - children: dict[str,TypeRef] + children: dict[str, TypeRef] used_directly: bool - def fields(self) -> Iterable[TypeRef|Attribute]: + def fields(self) -> Iterable[TypeRef | Attribute]: yield from self.attributes.values() yield from self.children.values() @@ -117,78 +132,88 @@ def fields(self) -> Iterable[TypeRef|Attribute]: def direct_field_count(self): return len(self.attributes) + len(self.children) - def all_fields(self) -> Iterable[TypeRef|Attribute]: + def all_fields(self) -> Iterable[TypeRef | Attribute]: for b in self.bases: - if isinstance(b,ElementType): + if isinstance(b, ElementType): yield from b.all_fields() yield from self.fields() - + @property def py_name(self) -> str: - return f'Node_{self.name}' + return f"Node_{self.name}" + @dataclasses.dataclass(slots=True) class TagOnlyElement(ElementType): pass + @dataclasses.dataclass(slots=True) class ListElement(ElementType): min_items: int - content: dict[str,str|SchemaType] + content: dict[str, str | SchemaType] content_type: ContentType allow_text: bool - sp_tag: str|None = None + sp_tag: str | None = None def content_names(self) -> Iterable[str]: for b in self.bases: - assert isinstance(b,SchemaType) + assert isinstance(b, SchemaType) yield from b.content_names() yield from self.content def all_content(self): for b in self.bases: - if isinstance(b,ListElement): + if isinstance(b, ListElement): yield from b.content.values() yield from self.content.values() def py_item_type_union_size(self) -> int: size = len(self.content) if self.content_type == ContentType.union else 0 for b in self.bases: - if isinstance(b,ListElement): + if isinstance(b, ListElement): size += b.py_item_type_union_size() return size def py_union_ref(self) -> list[str]: types = self.py_union_list() - if len(types) <= 1: return types - return ['ListItem_'+self.name] + if len(types) <= 1: + return types + return ["ListItem_" + self.name] def py_union_list(self) -> list[str]: by_type = collections.defaultdict(list) - for name,t in self.content.items(): - assert isinstance(t,SchemaType) + for name, t in self.content.items(): + assert isinstance(t, SchemaType) by_type[t.py_name].append(name) - types = ['TaggedValue[Literal[{}], {}]'.format( - comma_join(sorted(f"'{n}'" for n in names),26), - t) for t,names in by_type.items()] + types = [ + "TaggedValue[Literal[{}], {}]".format( + comma_join(sorted(f"'{n}'" for n in names), 26), t + ) + for t, names in by_type.items() + ] str_included = False for b in self.bases: - if isinstance(b,ListElement): + if isinstance(b, ListElement): types.extend(b.py_union_ref()) - if b.allow_text: str_included = True + if b.allow_text: + str_included = True if self.allow_text and not str_included: - types.append('str') + types.append("str") return types + @dataclasses.dataclass(slots=True) class Schema: - roots: dict[str,str|SchemaType] - types: dict[str,SchemaType] + roots: dict[str, str | SchemaType] + types: dict[str, SchemaType] + class EnumEntry(NamedTuple): xml: str id: str + @dataclasses.dataclass(slots=True) class SchemaEnum(AttributeType): children: list[EnumEntry] @@ -196,11 +221,12 @@ class SchemaEnum(AttributeType): def any_renamed(self) -> bool: return any(c.xml != c.id for c in self.children) - + @property def py_name(self) -> str: return self.name + @dataclasses.dataclass(slots=True) class SchemaCharEnum(AttributeType): values: str @@ -209,106 +235,122 @@ class SchemaCharEnum(AttributeType): def py_name(self) -> str: return self.name -def unknown_type_error(ref: str,context: str,is_element: bool) -> NoReturn: - thing = 'element' if is_element else 'attribute' + +def unknown_type_error(ref: str, context: str, is_element: bool) -> NoReturn: + thing = "element" if is_element else "attribute" raise ValueError(f'{thing} "{context}" has undefined type "{ref}"') -def check_type_ref(schema: Schema,ref: str,context: str,is_element: bool=True) -> SchemaType: + +def check_type_ref(schema: Schema, ref: str, context: str, is_element: bool = True) -> SchemaType: t = schema.types.get(ref) if t is None: - unknown_type_error(ref,context,is_element) + unknown_type_error(ref, context, is_element) return t -def check_attr_type_ref(schema: Schema,ref: str,context: str) -> AttributeType: - r = check_type_ref(schema,ref,context,False) - if isinstance(r,AttributeType): + +def check_attr_type_ref(schema: Schema, ref: str, context: str) -> AttributeType: + r = check_type_ref(schema, ref, context, False) + if isinstance(r, AttributeType): return r - + raise ValueError(f'attribute "{context}" has incompatible type "{ref}"') + def check_py_name(name: str) -> None: if (not name.isidentifier()) or keyword.iskeyword(name): raise ValueError(f'"{name}" is not a valid Python identifier') - if name == '_children': + if name == "_children": raise ValueError('the name "_children" is reserved by the parser generator') -def resolve_refs(schema: Schema) -> tuple[list[str],list[str]]: + +def resolve_refs(schema: Schema) -> tuple[list[str], list[str]]: """Check that all referenced types exist and return the lists of all element names and attribute names""" elements: set[str] = set() attributes: set[str] = set() - def check_element_type_defined(name: str,ref: str) -> SchemaType: - t = check_type_ref(schema,ref,name) - if isinstance(t,ElementType): + def check_element_type_defined(name: str, ref: str) -> SchemaType: + t = check_type_ref(schema, ref, name) + if isinstance(t, ElementType): t.used_directly = True return t - for name,r in schema.roots.items(): + for name, r in schema.roots.items(): elements.add(name) - schema.roots[name] = check_element_type_defined(name,cast(str,r)) + schema.roots[name] = check_element_type_defined(name, cast(str, r)) - for typename,t in schema.types.items(): - if not t.name: t.name = typename + for typename, t in schema.types.items(): + if not t.name: + t.name = typename - if isinstance(t,ElementType): + if isinstance(t, ElementType): # TODO: check for recursive bases - for i,b in enumerate(t.bases): - b_type = schema.types.get(cast(str,b)) + for i, b in enumerate(t.bases): + b_type = schema.types.get(cast(str, b)) if b_type is None: raise ValueError(f'type "{typename}" has undefined base "{b}"') - if not isinstance(b_type,ElementType): + if not isinstance(b_type, ElementType): raise ValueError(f'"{b}" cannot be used as a base') - if isinstance(b_type,ListElement): - if not isinstance(t,ListElement): - raise ValueError(f'non-list elements cannot use list elements as bases') + if isinstance(b_type, ListElement): + if not isinstance(t, ListElement): + raise ValueError(f"non-list elements cannot use list elements as bases") if b_type.content_type != t.content_type: - raise ValueError(f'list elements of one type cannot use list elements of another type as bases') + raise ValueError( + f"list elements of one type cannot use list elements of another type as bases" + ) t.bases[i] = b_type - for name,child in t.children.items(): + for name, child in t.children.items(): child.name = name - if not child.py_name: child.py_name = name + if not child.py_name: + child.py_name = name check_py_name(child.py_name) elements.add(name) - child.type = check_element_type_defined(f'{typename}.{name}',cast(str,child.type)) - for name,attr in t.attributes.items(): + child.type = check_element_type_defined(f"{typename}.{name}", cast(str, child.type)) + for name, attr in t.attributes.items(): attr.name = name - if not attr.py_name: attr.py_name = name + if not attr.py_name: + attr.py_name = name check_py_name(attr.py_name) attributes.add(name) - t.attributes[name].type = check_attr_type_ref(schema,cast(str,attr.type),name) - if isinstance(t,ListElement): - for name,r in t.content.items(): + t.attributes[name].type = check_attr_type_ref(schema, cast(str, attr.type), name) + if isinstance(t, ListElement): + for name, r in t.content.items(): elements.add(name) - t.content[name] = check_element_type_defined(f'{typename}.{name}',cast(str,r)) - - elements.update(schema.roots); + t.content[name] = check_element_type_defined(f"{typename}.{name}", cast(str, r)) + + elements.update(schema.roots) + + return sorted(elements), sorted(attributes) - return sorted(elements),sorted(attributes) class HashData(NamedTuple): salt1: str salt2: str g: list[int] + def generate_hash(items: list[str]) -> HashData: try: - f1,f2,g = perfect_hash.generate_hash(items) - return HashData(f1.salt,f2.salt,g) + f1, f2, g = perfect_hash.generate_hash(items) + return HashData(f1.salt, f2.salt, g) except ValueError: - print(items,file=sys.stderr) + print(items, file=sys.stderr) raise -def collect_field_names(all_fields: set[str],cur_fields: set[str],refs: Iterable[Attribute|TypeRef],type_name: str) -> None: + +def collect_field_names( + all_fields: set[str], cur_fields: set[str], refs: Iterable[Attribute | TypeRef], type_name: str +) -> None: for ref in refs: all_fields.add(ref.py_name) if ref.py_name in cur_fields: raise ValueError(f'python name "{ref.py_name}" appears more than once in "{type_name}"') cur_fields.add(ref.py_name) + def make_env(schema: Schema) -> jinja2.Environment: - elements,attributes = resolve_refs(schema) + elements, attributes = resolve_refs(schema) tag_names: set[str] = set(schema.roots) py_field_name_set: set[str] = set() char_enum_chars: set[str] = set() @@ -317,24 +359,25 @@ def make_env(schema: Schema) -> jinja2.Environment: tuple_field_counts: set[int] = set() def field_count(t) -> int: - if not isinstance(t,ElementType): return 0 - return len(t.attributes) + len(t.children) + sum(cast(int,field_count(b)) for b in t.bases) + if not isinstance(t, ElementType): + return 0 + return len(t.attributes) + len(t.children) + sum(cast(int, field_count(b)) for b in t.bases) for t in schema.types.values(): - if isinstance(t,SchemaEnum): + if isinstance(t, SchemaEnum): if len(t.children) >= HASH_LOOKUP_THRESHOLD: t.hash = generate_hash([item.xml for item in t.children]) - elif isinstance(t,SchemaCharEnum): + elif isinstance(t, SchemaCharEnum): char_enum_chars.update(t.values) - elif isinstance(t,ElementType): + elif isinstance(t, ElementType): fields: set[str] = set() - collect_field_names(py_field_name_set,fields,t.attributes.values(),t.name) - collect_field_names(py_field_name_set,fields,t.children.values(),t.name) + collect_field_names(py_field_name_set, fields, t.attributes.values(), t.name) + collect_field_names(py_field_name_set, fields, t.children.values(), t.name) - if isinstance(t,TagOnlyElement): + if isinstance(t, TagOnlyElement): if t.used_directly: tagonly_and_tuple_field_counts.add(field_count(t)) - elif isinstance(t,ListElement): + elif isinstance(t, ListElement): if t.used_directly: list_element_field_counts.add(field_count(t)) if t.content_type == ContentType.union: @@ -342,86 +385,101 @@ def field_count(t) -> int: elif t.content_type == ContentType.tuple: tuple_field_counts.add(len(t.content)) tagonly_and_tuple_field_counts.add(len(t.content)) - + py_field_names = sorted(py_field_name_set) tmpl_env = jinja2.Environment( - block_start_string='{%', - block_end_string='%}', - variable_start_string='{$', - variable_end_string='$}', - comment_start_string='/*#', - comment_end_string='#*/', - line_statement_prefix='//%', - line_comment_prefix='//#', - autoescape=False) + block_start_string="{%", + block_end_string="%}", + variable_start_string="{$", + variable_end_string="$}", + comment_start_string="/*#", + comment_end_string="#*/", + line_statement_prefix="//%", + line_comment_prefix="//#", + autoescape=False, + ) def has_attributes(t): - if not isinstance(t,ElementType): + if not isinstance(t, ElementType): return False return t.attributes or any(has_attributes(b) for b in t.bases) - + def has_children(t): - if not isinstance(t,ElementType): + if not isinstance(t, ElementType): return False return t.children or any(has_children(b) for b in t.bases) def has_children_or_content(t): - if not isinstance(t,ElementType): + if not isinstance(t, ElementType): return False - return t.children or (isinstance(t,ListElement) and t.content) or any(has_children_or_content(b) for b in t.bases) - + return ( + t.children + or (isinstance(t, ListElement) and t.content) + or any(has_children_or_content(b) for b in t.bases) + ) + def has_children_or_tuple_content(t): - if not isinstance(t,ElementType): + if not isinstance(t, ElementType): return False - return (t.children - or (isinstance(t,ListElement) and t.content_type == ContentType.tuple and len(t.content) > 1) - or any(has_children_or_tuple_content(b) for b in t.bases)) + return ( + t.children + or ( + isinstance(t, ListElement) + and t.content_type == ContentType.tuple + and len(t.content) > 1 + ) + or any(has_children_or_tuple_content(b) for b in t.bases) + ) def base_offsets(t): - if not isinstance(t,ElementType): + if not isinstance(t, ElementType): return tmpl_env.undefined() total = 0 for b in t.bases: - assert isinstance(b,SchemaType) - yield b,total + assert isinstance(b, SchemaType) + yield b, total total += field_count(b) - yield None,total + yield None, total def list_type_or_base(t): - if not isinstance(t,ElementType): return False - return isinstance(t,ListElement) or any(list_type_or_base(b) for b in t.bases) + if not isinstance(t, ElementType): + return False + return isinstance(t, ListElement) or any(list_type_or_base(b) for b in t.bases) def allow_text(t): - if not isinstance(t,ListElement): return False + if not isinstance(t, ListElement): + return False return t.allow_text or any(allow_text(b) for b in t.bases) def content_type(ct): def inner(t): - if not isinstance(t,ListElement): return False + if not isinstance(t, ListElement): + return False return t.content_type == ct + return inner def children(t): - if not isinstance(t,ElementType): + if not isinstance(t, ElementType): return tmpl_env.undefined() return t.children.values() def get_attributes(t): - if not isinstance(t,ElementType): + if not isinstance(t, ElementType): return tmpl_env.undefined() return t.attributes.values() - + def content(t): - if not isinstance(t,ListElement): + if not isinstance(t, ListElement): return tmpl_env.undefined() return t.content.items() def used_directly(t): - return isinstance(t,ElementType) and t.used_directly - - def optional(ref: TypeRef|Attribute) -> bool: - if isinstance(ref,TypeRef): + return isinstance(t, ElementType) and t.used_directly + + def optional(ref: TypeRef | Attribute) -> bool: + if isinstance(ref, TypeRef): return ref.is_list or ref.min_items == 0 return ref.optional @@ -429,220 +487,298 @@ def error(msg): raise TypeError(msg) class Once: - def __init__(self,content): + def __init__(self, content): self.content = content self.used = False - + def __call__(self): - if self.used: return '' + if self.used: + return "" self.used = True return self.content for t in schema.types.values(): - if isinstance(t,ElementType) and any(field_count(cast(ElementType,b)) for b in t.bases): + if isinstance(t, ElementType) and any(field_count(cast(ElementType, b)) for b in t.bases): raise ValueError( - 'elements having bases that have "attributes" or "children" are not currently supported') - - tmpl_env.tests.update({ - 'element': (lambda x: isinstance(x,ElementType)), - 'tagonly_e': (lambda x: isinstance(x,TagOnlyElement)), - 'list_e': list_type_or_base, - 'builtin_t': (lambda x: isinstance(x,BuiltinType)), - 'enumeration_t': (lambda x: isinstance(x,SchemaEnum)), - 'char_enum_t': (lambda x: isinstance(x,SchemaCharEnum)), - 'appends_str': (lambda x: isinstance(x,SpType)), - 'used_directly': used_directly, - 'allow_text': allow_text, - 'has_attributes': has_attributes, - 'has_children': has_children, - 'has_children_or_content': has_children_or_content, - 'has_fields': lambda x: field_count(x) > 0, - 'has_children_or_tuple_content': has_children_or_tuple_content, - 'content_bare': content_type(ContentType.bare), - 'content_tuple': content_type(ContentType.tuple), - 'content_union': content_type(ContentType.union), - 'optional': optional}) - tmpl_env.filters.update({ - 'field_count': field_count, - 'base_offsets': base_offsets, - 'children': children, - 'attributes': get_attributes, - 'content': content, - 'error': error, - 'Once': Once}) - tmpl_env.globals.update({ - 'types': list(schema.types.values()), - 'root_elements': list(schema.roots.items()), - 'element_names': elements, - 'attribute_names': attributes, - 'py_field_names': py_field_names, - 'e_hash': generate_hash(elements), - 'a_hash': generate_hash(attributes), - 'py_f_hash': generate_hash(py_field_names), - 'union_tag_names': sorted(tag_names), - 'char_enum_chars': {c:i for i,c in enumerate(sorted(char_enum_chars))}, - 'list_element_field_counts': list(list_element_field_counts), - 'tagonly_and_tuple_field_counts': list(tagonly_and_tuple_field_counts), - 'tuple_field_counts': list(tuple_field_counts), - 'OtherAttrAction': OtherAttrAction}) + 'elements having bases that have "attributes" or "children" are not currently supported' + ) + + tmpl_env.tests.update( + { + "element": (lambda x: isinstance(x, ElementType)), + "tagonly_e": (lambda x: isinstance(x, TagOnlyElement)), + "list_e": list_type_or_base, + "builtin_t": (lambda x: isinstance(x, BuiltinType)), + "enumeration_t": (lambda x: isinstance(x, SchemaEnum)), + "char_enum_t": (lambda x: isinstance(x, SchemaCharEnum)), + "appends_str": (lambda x: isinstance(x, SpType)), + "used_directly": used_directly, + "allow_text": allow_text, + "has_attributes": has_attributes, + "has_children": has_children, + "has_children_or_content": has_children_or_content, + "has_fields": lambda x: field_count(x) > 0, + "has_children_or_tuple_content": has_children_or_tuple_content, + "content_bare": content_type(ContentType.bare), + "content_tuple": content_type(ContentType.tuple), + "content_union": content_type(ContentType.union), + "optional": optional, + } + ) + tmpl_env.filters.update( + { + "field_count": field_count, + "base_offsets": base_offsets, + "children": children, + "attributes": get_attributes, + "content": content, + "error": error, + "Once": Once, + } + ) + tmpl_env.globals.update( + { + "types": list(schema.types.values()), + "root_elements": list(schema.roots.items()), + "element_names": elements, + "attribute_names": attributes, + "py_field_names": py_field_names, + "e_hash": generate_hash(elements), + "a_hash": generate_hash(attributes), + "py_f_hash": generate_hash(py_field_names), + "union_tag_names": sorted(tag_names), + "char_enum_chars": {c: i for i, c in enumerate(sorted(char_enum_chars))}, + "list_element_field_counts": list(list_element_field_counts), + "tagonly_and_tuple_field_counts": list(tagonly_and_tuple_field_counts), + "tuple_field_counts": list(tuple_field_counts), + "OtherAttrAction": OtherAttrAction, + } + ) return tmpl_env -class _NoDefault: pass + +class _NoDefault: + pass + + _NO_DEFAULT = _NoDefault() -def get_json_value(conv: Callable[[Any,str],T],context: str,d: dict[str,Any],key:str,default: T|_NoDefault=_NO_DEFAULT) -> T: - r = d.get(key,_NO_DEFAULT) + + +def get_json_value( + conv: Callable[[Any, str], T], + context: str, + d: dict[str, Any], + key: str, + default: T | _NoDefault = _NO_DEFAULT, +) -> T: + r = d.get(key, _NO_DEFAULT) if r is _NO_DEFAULT: if default is _NO_DEFAULT: raise ValueError(f'missing value for "{context}.{key}"') - return cast(T,default) - return conv(r,context) + return cast(T, default) + return conv(r, context) -def check_simple(t: type[T],name: str) -> Callable[[Any,str],T]: - def inner(x,context:str) -> T: - if isinstance(x,t): + +def check_simple(t: type[T], name: str) -> Callable[[Any, str], T]: + def inner(x, context: str) -> T: + if isinstance(x, t): return x raise TypeError(f'value for "{context}" must be {name}') + return inner -get_json_bool = functools.partial(get_json_value,check_simple(bool,'a boolean')) -get_json_int = functools.partial(get_json_value,check_simple(int,'an integer')) -check_string = check_simple(str,'a string') -get_json_str = functools.partial(get_json_value,check_string) -check_obj = check_simple(cast(type[dict[str,Any]],dict),'an object') -get_json_obj = functools.partial(get_json_value,check_obj) +get_json_bool = functools.partial(get_json_value, check_simple(bool, "a boolean")) +get_json_int = functools.partial(get_json_value, check_simple(int, "an integer")) + +check_string = check_simple(str, "a string") +get_json_str = functools.partial(get_json_value, check_string) + +check_obj = check_simple(cast(type[dict[str, Any]], dict), "an object") +get_json_obj = functools.partial(get_json_value, check_obj) + +check_list = check_simple(list, "an array") + + +def get_json_mapping( + item_conv: Callable[[Any, str], T], + context: str, + d: dict, + key: str, + default: dict[str, T] | _NoDefault = _NO_DEFAULT, +) -> dict[str, T]: + def check(x, context): + x = check_obj(x, context) + return {key: item_conv(value, f"{context}.{key}") for key, value in x.items()} + + return get_json_value(check, context, d, key, default) -check_list = check_simple(list,'an array') -def get_json_mapping(item_conv: Callable[[Any,str],T],context: str,d: dict,key: str,default: dict[str,T]|_NoDefault=_NO_DEFAULT) -> dict[str,T]: - def check(x,context): - x = check_obj(x,context) - return {key:item_conv(value,f'{context}.{key}') for key,value in x.items()} - return get_json_value(check,context,d,key,default) +def get_json_list( + item_conv: Callable[[Any, str], T], + context: str, + d: dict, + key: str, + default: list[T] | _NoDefault = _NO_DEFAULT, +) -> list[T]: + def check(x, context) -> list[T]: + x = check_list(x, context) + return [item_conv(value, f"{context}[{i}]") for i, value in enumerate(x)] -def get_json_list(item_conv: Callable[[Any,str],T],context: str,d: dict,key: str,default: list[T]|_NoDefault=_NO_DEFAULT) -> list[T]: - def check(x,context) -> list[T]: - x = check_list(x,context) - return [item_conv(value,f'{context}[{i}]') for i,value in enumerate(x)] - return get_json_value(check,context,d,key,default) + return get_json_value(check, context, d, key, default) -def check_zero_or_one(x,context:str) -> Literal[0] | Literal[1]: - if x == 0: return 0 - if x == 1: return 1 + +def check_zero_or_one(x, context: str) -> Literal[0] | Literal[1]: + if x == 0: + return 0 + if x == 1: + return 1 raise TypeError(f'value for "{context}" must be 0 or 1') -get_json_zero_or_one = functools.partial(get_json_value,check_zero_or_one) -def check_other_attr_action(x,context:str) -> OtherAttrAction: - if x == "ignore": return OtherAttrAction.ignore - if x == "error": return OtherAttrAction.error + +get_json_zero_or_one = functools.partial(get_json_value, check_zero_or_one) + + +def check_other_attr_action(x, context: str) -> OtherAttrAction: + if x == "ignore": + return OtherAttrAction.ignore + if x == "error": + return OtherAttrAction.error raise TypeError(f'value for "{context}" must be "error" or "ignore"') -get_json_other_attr_action = functools.partial(get_json_value,check_other_attr_action) -def check_typeref(x,context:str) -> TypeRef: - x = check_obj(x,context) + +get_json_other_attr_action = functools.partial(get_json_value, check_other_attr_action) + + +def check_typeref(x, context: str) -> TypeRef: + x = check_obj(x, context) return TypeRef( - '', - get_json_str(context,x,'py_name',''), - get_json_str(context,x,'type'), - get_json_bool(context,x,'is_list',False), - get_json_zero_or_one(context,x,'min_items',1)) -get_json_typeref = functools.partial(get_json_value,check_typeref) - -def check_attribute(x,context:str) -> Attribute: - x = check_obj(x,context) + "", + get_json_str(context, x, "py_name", ""), + get_json_str(context, x, "type"), + get_json_bool(context, x, "is_list", False), + get_json_zero_or_one(context, x, "min_items", 1), + ) + + +get_json_typeref = functools.partial(get_json_value, check_typeref) + + +def check_attribute(x, context: str) -> Attribute: + x = check_obj(x, context) return Attribute( - '', - get_json_str(context,x,'py_name',''), - get_json_str(context,x,'type'), - get_json_bool(context,x,'optional',False)) -get_json_attribute = functools.partial(get_json_value,check_attribute) - -def check_enum_entry(x,context: str) -> EnumEntry: - if isinstance(x,str): - return EnumEntry(x,x) - if isinstance(x,dict): - xml = get_json_str(context,x,'xml') - id = get_json_str(context,x,'id',xml) + "", + get_json_str(context, x, "py_name", ""), + get_json_str(context, x, "type"), + get_json_bool(context, x, "optional", False), + ) + + +get_json_attribute = functools.partial(get_json_value, check_attribute) + + +def check_enum_entry(x, context: str) -> EnumEntry: + if isinstance(x, str): + return EnumEntry(x, x) + if isinstance(x, dict): + xml = get_json_str(context, x, "xml") + id = get_json_str(context, x, "id", xml) if not id.isidentifier(): raise ValueError(f'value of "{context}" is not a valid Python identifier') - return EnumEntry(xml,id) + return EnumEntry(xml, id) raise TypeError(f'"{context}" must be a string or object') -def make_tag_only_element(x: dict[str,Any],context: str) -> TagOnlyElement: + +def make_tag_only_element(x: dict[str, Any], context: str) -> TagOnlyElement: return TagOnlyElement( - '', - get_json_list(check_string,context,x,'bases',[]), - get_json_mapping(check_attribute,context,x,'attributes',{}), - get_json_other_attr_action(context,x,'other_attr',OtherAttrAction.error), - get_json_mapping(check_typeref,context,x,'children',{}), - False) - -def make_list_element(x: dict[str,Any],context: str,content_t: ContentType) -> ListElement: + "", + get_json_list(check_string, context, x, "bases", []), + get_json_mapping(check_attribute, context, x, "attributes", {}), + get_json_other_attr_action(context, x, "other_attr", OtherAttrAction.error), + get_json_mapping(check_typeref, context, x, "children", {}), + False, + ) + + +def make_list_element(x: dict[str, Any], context: str, content_t: ContentType) -> ListElement: return ListElement( - '', - get_json_list(check_string,context,x,'bases',[]), - get_json_mapping(check_attribute,context,x,'attributes',{}), - get_json_other_attr_action(context,x,'other_attr',OtherAttrAction.error), - get_json_mapping(check_typeref,context,x,'children',{}), + "", + get_json_list(check_string, context, x, "bases", []), + get_json_mapping(check_attribute, context, x, "attributes", {}), + get_json_other_attr_action(context, x, "other_attr", OtherAttrAction.error), + get_json_mapping(check_typeref, context, x, "children", {}), False, - get_json_int(context,x,'min_items',0), - get_json_mapping(check_string,context,x,'content',{}), + get_json_int(context, x, "min_items", 0), + get_json_mapping(check_string, context, x, "content", {}), content_t, - get_json_bool(context,x,'allow_text',False)) - -def make_enumeration(x: dict[str,Any],context: str) -> SchemaEnum: - return SchemaEnum('',get_json_list(check_enum_entry,context,x,'values')) - -def make_char_enumeration(x: dict[str,Any],context: str) -> SchemaCharEnum: - return SchemaCharEnum('',get_json_str(context,x,'values')) - -def check_type(x,context:str) -> SchemaType: - x = check_obj(x,context) - kind = get_json_str(context,x,'kind') - if kind == 'tag_only_element': - return make_tag_only_element(x,context) - if kind == 'list_element': - return make_list_element(x,context,ContentType.bare) - if kind == 'union_list_element': - return make_list_element(x,context,ContentType.union) - if kind == 'tuple_list_element': - return make_list_element(x,context,ContentType.tuple) - if kind == 'enumeration': - return make_enumeration(x,context) - if kind == 'char_enumeration': - return make_char_enumeration(x,context) - - raise ValueError(f'"{context}.kind" must be "tag_only_element", "list_element", "mixed_element" or "enumeration"') -get_json_type = functools.partial(get_json_value,check_type) + get_json_bool(context, x, "allow_text", False), + ) + + +def make_enumeration(x: dict[str, Any], context: str) -> SchemaEnum: + return SchemaEnum("", get_json_list(check_enum_entry, context, x, "values")) + + +def make_char_enumeration(x: dict[str, Any], context: str) -> SchemaCharEnum: + return SchemaCharEnum("", get_json_str(context, x, "values")) + + +def check_type(x, context: str) -> SchemaType: + x = check_obj(x, context) + kind = get_json_str(context, x, "kind") + if kind == "tag_only_element": + return make_tag_only_element(x, context) + if kind == "list_element": + return make_list_element(x, context, ContentType.bare) + if kind == "union_list_element": + return make_list_element(x, context, ContentType.union) + if kind == "tuple_list_element": + return make_list_element(x, context, ContentType.tuple) + if kind == "enumeration": + return make_enumeration(x, context) + if kind == "char_enumeration": + return make_char_enumeration(x, context) + + raise ValueError( + f'"{context}.kind" must be "tag_only_element", "list_element", "mixed_element" or "enumeration"' + ) + + +get_json_type = functools.partial(get_json_value, check_type) + def check_schema(x) -> Schema: - if not isinstance(x,dict): - raise TypeError('json value must be an object') + if not isinstance(x, dict): + raise TypeError("json value must be an object") r = Schema( - get_json_mapping(check_string,'',x,'roots'), - get_json_mapping(check_type,'',x,'types',{})) - r.types['#spType'] = SpType('spType','str') - for t,py in BUILTIN_ATTR_SCHEMA_TYPES: - r.types['#'+t] = BuiltinAttributeType(t,py) + get_json_mapping(check_string, "", x, "roots"), + get_json_mapping(check_type, "", x, "types", {}), + ) + r.types["#spType"] = SpType("spType", "str") + for t, py in BUILTIN_ATTR_SCHEMA_TYPES: + r.types["#" + t] = BuiltinAttributeType(t, py) return r -def generate_from_json(json_path,c_template_file,pyi_template_file,c_output_file,pyi_output_file) -> None: - with open(json_path,'rb') as ifile: + +def generate_from_json( + json_path, c_template_file, pyi_template_file, c_output_file, pyi_output_file +) -> None: + with open(json_path, "rb") as ifile: schema = check_schema(json.load(ifile)) env = make_env(schema) with open(c_template_file) as tfile: template_str = tfile.read() - with open(c_output_file,'w') as ofile: + with open(c_output_file, "w") as ofile: env.from_string(template_str).stream().dump(ofile) - + with open(pyi_template_file) as tfile: template_str = tfile.read() - with open(pyi_output_file,'w') as ofile: + with open(pyi_output_file, "w") as ofile: env.from_string(template_str).stream().dump(ofile) -if __name__ == '__main__': - generate_from_json(sys.argv[1],sys.argv[2],sys.argv[3],sys.argv[4],sys.argv[5]) +if __name__ == "__main__": + generate_from_json(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5]) diff --git a/xml_parser_generator/module_template.c b/xml_parser_generator/module_template.c.in similarity index 100% rename from xml_parser_generator/module_template.c rename to xml_parser_generator/module_template.c.in diff --git a/xml_parser_generator/stubs_template.pyi b/xml_parser_generator/stubs_template.pyi.in similarity index 100% rename from xml_parser_generator/stubs_template.pyi rename to xml_parser_generator/stubs_template.pyi.in From 5f55b6486e01cb9d36d236c3d6ba1b720cda9380 Mon Sep 17 00:00:00 2001 From: Rouslan Korneychuk Date: Wed, 13 Dec 2023 00:49:51 -0500 Subject: [PATCH 31/65] Update unit test action --- .github/workflows/unit_tests.yml | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/.github/workflows/unit_tests.yml b/.github/workflows/unit_tests.yml index 7a797984..9bd19b63 100644 --- a/.github/workflows/unit_tests.yml +++ b/.github/workflows/unit_tests.yml @@ -7,10 +7,8 @@ jobs: strategy: fail-fast: false matrix: - python-version: ['3.7', '3.8', '3.9', '3.10'] + python-version: ['3.8', '3.9', '3.10', '3.11', '3.12'] sphinx-version: - - '4.0.3' - - '4.1.2' - '4.2.0' - '4.3.2' - '4.5.0' @@ -28,12 +26,14 @@ jobs: sphinx-version: '4.0.3' - python-version: '3.10' sphinx-version: '4.1.2' - - # Sphinx has removed support for Python 3.7, Breathe will follow. - - python-version: '3.7' - sphinx-version: git+https://github.com/sphinx-doc/sphinx.git@master - - python-version: '3.7' - sphinx-version: '6.1.3' + - python-version: '3.11' + sphinx-version: '4.0.3' + - python-version: '3.11' + sphinx-version: '4.1.2' + - python-version: '3.12' + sphinx-version: '4.0.3' + - python-version: '3.12' + sphinx-version: '4.1.2' steps: - uses: actions/checkout@v2 From 5626a08df14e14d6cba2ad2f0044edfb924dfbba Mon Sep 17 00:00:00 2001 From: Rouslan Korneychuk Date: Wed, 13 Dec 2023 01:07:57 -0500 Subject: [PATCH 32/65] Update git actions - attempt 2 --- .github/workflows/documentation.yml | 4 ++-- .github/workflows/unit_tests.yml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml index e4ede026..c9e4246a 100644 --- a/.github/workflows/documentation.yml +++ b/.github/workflows/documentation.yml @@ -17,11 +17,11 @@ jobs: - name: set up python uses: actions/setup-python@v1 - - name: install dependencies + - name: install dependencies and build extension module run: | - pip install -r requirements/development.txt sudo apt-get -y update sudo apt-get -y install graphviz libclang1-11 libclang-cpp11 + pip install --editable . - name: install doxygen from SF binary archives env: diff --git a/.github/workflows/unit_tests.yml b/.github/workflows/unit_tests.yml index 9bd19b63..f85209c6 100644 --- a/.github/workflows/unit_tests.yml +++ b/.github/workflows/unit_tests.yml @@ -56,9 +56,9 @@ jobs: pip install -Iv Sphinx==${{ matrix.sphinx-version }} fi - - name: install dependencies + - name: install dependencies and build extension module run: | - pip install -r requirements/development.txt + pip install --editable . - name: run the unit tests run: make dev-test From cec80a68665158008805fdec22ff45d70db7f1a9 Mon Sep 17 00:00:00 2001 From: Rouslan Korneychuk Date: Wed, 13 Dec 2023 01:20:17 -0500 Subject: [PATCH 33/65] Attempted fix for code that works locally but not on test runner --- xml_parser_generator/make_parser.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/xml_parser_generator/make_parser.py b/xml_parser_generator/make_parser.py index 4a041c0d..35ee9e3a 100644 --- a/xml_parser_generator/make_parser.py +++ b/xml_parser_generator/make_parser.py @@ -103,12 +103,14 @@ class BuiltinType(SchemaType): py_name: str +@dataclasses.dataclass(slots=True) class SpType(BuiltinType): - __slots__ = () + pass +@dataclasses.dataclass(slots=True) class BuiltinAttributeType(BuiltinType, AttributeType): - __slots__ = () + pass class OtherAttrAction(enum.Enum): From 6583436e5e1bf9308157e1e56187c6cfe8a82219 Mon Sep 17 00:00:00 2001 From: Rouslan Korneychuk Date: Wed, 13 Dec 2023 01:30:27 -0500 Subject: [PATCH 34/65] Fix in Github actions and Python compatibility --- .github/workflows/documentation.yml | 1 + .github/workflows/unit_tests.yml | 1 + xml_parser_generator/make_parser.py | 26 +++++++++++++------------- 3 files changed, 15 insertions(+), 13 deletions(-) diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml index c9e4246a..7452993f 100644 --- a/.github/workflows/documentation.yml +++ b/.github/workflows/documentation.yml @@ -21,6 +21,7 @@ jobs: run: | sudo apt-get -y update sudo apt-get -y install graphviz libclang1-11 libclang-cpp11 + pip install -r requirements/development.txt pip install --editable . - name: install doxygen from SF binary archives diff --git a/.github/workflows/unit_tests.yml b/.github/workflows/unit_tests.yml index f85209c6..7fbe8cb8 100644 --- a/.github/workflows/unit_tests.yml +++ b/.github/workflows/unit_tests.yml @@ -58,6 +58,7 @@ jobs: - name: install dependencies and build extension module run: | + pip install -r requirements/development.txt pip install --editable . - name: run the unit tests diff --git a/xml_parser_generator/make_parser.py b/xml_parser_generator/make_parser.py index 35ee9e3a..06dd9fde 100644 --- a/xml_parser_generator/make_parser.py +++ b/xml_parser_generator/make_parser.py @@ -46,7 +46,7 @@ class ContentType(enum.Enum): union = enum.auto() -@dataclasses.dataclass(slots=True) +@dataclasses.dataclass() class TypeRef: name: str py_name: str @@ -64,7 +64,7 @@ def py_type(self, as_param=False) -> str: return self.type.py_name -@dataclasses.dataclass(slots=True) +@dataclasses.dataclass() class Attribute: name: str py_name: str @@ -78,7 +78,7 @@ def py_type(self, as_param=False) -> str: return self.type.py_name -@dataclasses.dataclass(slots=True) +@dataclasses.dataclass() class SchemaType: name: str @@ -93,22 +93,22 @@ def py_name(self) -> str: raise NotImplementedError -@dataclasses.dataclass(slots=True) +@dataclasses.dataclass() class AttributeType(SchemaType): pass -@dataclasses.dataclass(slots=True) +@dataclasses.dataclass() class BuiltinType(SchemaType): py_name: str -@dataclasses.dataclass(slots=True) +@dataclasses.dataclass() class SpType(BuiltinType): pass -@dataclasses.dataclass(slots=True) +@dataclasses.dataclass() class BuiltinAttributeType(BuiltinType, AttributeType): pass @@ -118,7 +118,7 @@ class OtherAttrAction(enum.Enum): error = enum.auto() -@dataclasses.dataclass(slots=True) +@dataclasses.dataclass() class ElementType(SchemaType): bases: list[str | SchemaType] attributes: dict[str, Attribute] @@ -145,12 +145,12 @@ def py_name(self) -> str: return f"Node_{self.name}" -@dataclasses.dataclass(slots=True) +@dataclasses.dataclass() class TagOnlyElement(ElementType): pass -@dataclasses.dataclass(slots=True) +@dataclasses.dataclass() class ListElement(ElementType): min_items: int content: dict[str, str | SchemaType] @@ -205,7 +205,7 @@ def py_union_list(self) -> list[str]: return types -@dataclasses.dataclass(slots=True) +@dataclasses.dataclass() class Schema: roots: dict[str, str | SchemaType] types: dict[str, SchemaType] @@ -216,7 +216,7 @@ class EnumEntry(NamedTuple): id: str -@dataclasses.dataclass(slots=True) +@dataclasses.dataclass() class SchemaEnum(AttributeType): children: list[EnumEntry] hash: HashData | None = None @@ -229,7 +229,7 @@ def py_name(self) -> str: return self.name -@dataclasses.dataclass(slots=True) +@dataclasses.dataclass() class SchemaCharEnum(AttributeType): values: str From d64b641dcc047463c1d223efbbf29849a8c84417 Mon Sep 17 00:00:00 2001 From: Rouslan Korneychuk Date: Wed, 13 Dec 2023 01:46:34 -0500 Subject: [PATCH 35/65] Fixed problem introduced by previous fix --- xml_parser_generator/make_parser.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/xml_parser_generator/make_parser.py b/xml_parser_generator/make_parser.py index 06dd9fde..616f52a1 100644 --- a/xml_parser_generator/make_parser.py +++ b/xml_parser_generator/make_parser.py @@ -88,9 +88,10 @@ def __str__(self): def content_names(self) -> Iterable[str]: return [] - @property - def py_name(self) -> str: - raise NotImplementedError + if TYPE_CHECKING: + @property + def py_name(self) -> str: + raise NotImplementedError @dataclasses.dataclass() @@ -597,7 +598,7 @@ def inner(x, context: str) -> T: check_string = check_simple(str, "a string") get_json_str = functools.partial(get_json_value, check_string) -check_obj = check_simple(cast(type[dict[str, Any]], dict), "an object") +check_obj = check_simple(cast("type[dict[str, Any]]", dict), "an object") get_json_obj = functools.partial(get_json_value, check_obj) check_list = check_simple(list, "an array") From 42f9e112de135520ceb303bf757cd23b3290e42e Mon Sep 17 00:00:00 2001 From: Rouslan Korneychuk Date: Wed, 13 Dec 2023 01:54:58 -0500 Subject: [PATCH 36/65] Neglected to install Doxygen in unit test action --- .github/workflows/unit_tests.yml | 11 +++++++++++ tests/test_renderer.py | 2 ++ 2 files changed, 13 insertions(+) diff --git a/.github/workflows/unit_tests.yml b/.github/workflows/unit_tests.yml index 7fbe8cb8..a073091f 100644 --- a/.github/workflows/unit_tests.yml +++ b/.github/workflows/unit_tests.yml @@ -56,6 +56,17 @@ jobs: pip install -Iv Sphinx==${{ matrix.sphinx-version }} fi + - name: install doxygen from SF binary archives + env: + DOXYGEN_VERSION: 1.9.4 + run: | + mkdir doxygen-bin-arc && cd doxygen-bin-arc + curl -L https://sourceforge.net/projects/doxygen/files/rel-$DOXYGEN_VERSION/doxygen-$DOXYGEN_VERSION.linux.bin.tar.gz > doxygen.tar.gz + gunzip doxygen.tar.gz + tar xf doxygen.tar + cd doxygen-$DOXYGEN_VERSION + sudo make install + - name: install dependencies and build extension module run: | pip install -r requirements/development.txt diff --git a/tests/test_renderer.py b/tests/test_renderer.py index 5eb9abe1..b91ad5c1 100644 --- a/tests/test_renderer.py +++ b/tests/test_renderer.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import os import sphinx.locale From 9c20dc737114977063c6c5b40a41626298a4573f Mon Sep 17 00:00:00 2001 From: Rouslan Korneychuk Date: Wed, 13 Dec 2023 04:00:05 -0500 Subject: [PATCH 37/65] Updated Github actions and added much needed cache --- .github/workflows/documentation.yml | 34 +++++--- .github/workflows/lint.yml | 13 ++- .github/workflows/unit_tests.yml | 34 +++++--- tests/data/auto/compare.xml | 10 +-- tests/data/examples/test_alias/compare.xml | 2 +- tests/data/examples/test_array/compare.xml | 4 +- tests/data/examples/test_c_enum/compare.xml | 2 +- tests/data/examples/test_c_file/compare.xml | 38 ++++----- tests/data/examples/test_class/compare.xml | 80 +++++++++---------- .../examples/test_code_blocks/compare.xml | 6 +- .../examples/test_cpp_concept/compare.xml | 2 +- tests/data/examples/test_cpp_enum/compare.xml | 16 ++-- .../examples/test_cpp_friendclass/compare.xml | 6 +- .../examples/test_cpp_function/compare.xml | 20 ++--- .../test_cpp_inherited_members/compare.xml | 6 +- .../test_cpp_trailing_return_type/compare.xml | 4 +- tests/data/examples/test_define/compare.xml | 8 +- .../data/examples/test_dot_graphs/compare.xml | 6 +- tests/data/examples/test_group/compare.xml | 18 ++--- .../test_group_content_only/compare.xml | 2 +- tests/data/examples/test_headings/compare.xml | 2 +- tests/data/examples/test_image/compare.xml | 2 +- tests/data/examples/test_index/compare.xml | 4 +- .../examples/test_inheritance/compare.xml | 16 ++-- tests/data/examples/test_inline/compare.xml | 4 +- .../data/examples/test_latexmath/compare.xml | 2 +- tests/data/examples/test_links/compare.xml | 2 +- tests/data/examples/test_lists/compare.xml | 22 ++--- .../examples/test_membergroups/compare.xml | 8 +- .../data/examples/test_param_dirs/compare.xml | 2 +- tests/data/examples/test_python/compare.xml | 14 ++-- .../test_qtsignalsandslots/compare.xml | 8 +- tests/data/examples/test_rst/compare.xml | 14 ++-- .../data/examples/test_simplesect/compare.xml | 2 +- tests/data/examples/test_tables/compare.xml | 10 +-- .../test_template_class_non_type/compare.xml | 8 +- .../test_template_function/compare.xml | 8 +- .../test_template_type_alias/compare.xml | 4 +- tests/data/examples/test_union/compare.xml | 14 ++-- .../examples/test_userdefined/compare.xml | 10 +-- tests/data/examples/test_xrefsect/compare.xml | 8 +- 41 files changed, 246 insertions(+), 229 deletions(-) diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml index 7452993f..81ff9576 100644 --- a/.github/workflows/documentation.yml +++ b/.github/workflows/documentation.yml @@ -7,15 +7,13 @@ jobs: steps: - uses: actions/checkout@v2 - - uses: actions/cache@v1 - with: - path: ~/.cache/pip - key: ${{ runner.os }}-pip-${{ hashFiles('requirements/*.txt') }} - restore-keys: | - ${{ runner.os }}-pip- - - name: set up python - uses: actions/setup-python@v1 + uses: actions/setup-python@v4 + with: + cache: 'pip' + cache-dependency-path: | + requirements/development.txt + requirements/production.txt - name: install dependencies and build extension module run: | @@ -24,12 +22,22 @@ jobs: pip install -r requirements/development.txt pip install --editable . - - name: install doxygen from SF binary archives - env: - DOXYGEN_VERSION: 1.9.4 + - name: cache Doxygen + id: cache-doxygen + uses: actions/cache@v3 + with: + path: doxygen-bin-arc/doxygen.tar.gz + key: ${{ runner.os }}-doxygen + + - name: download Doxygen from SF binary archives + if: steps.cache-doxygen.outputs.cache-hit != 'true' + run: | + mkdir doxygen-bin-arc + curl -L https://sourceforge.net/projects/doxygen/files/rel-$DOXYGEN_VERSION/doxygen-$DOXYGEN_VERSION.linux.bin.tar.gz > doxygen-bin-arc/doxygen.tar.gz + + - name: install Doxygen run: | - mkdir doxygen-bin-arc && cd doxygen-bin-arc - curl -L https://sourceforge.net/projects/doxygen/files/rel-$DOXYGEN_VERSION/doxygen-$DOXYGEN_VERSION.linux.bin.tar.gz > doxygen.tar.gz + cd doxygen-bin-arc gunzip doxygen.tar.gz tar xf doxygen.tar cd doxygen-$DOXYGEN_VERSION diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 5134e533..91355d47 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -7,14 +7,13 @@ jobs: steps: - uses: actions/checkout@v2 - - uses: actions/cache@v1 - with: - path: ~/.cache/pip - key: ${{ runner.os }}-pip-${{ hashFiles('requirements/*.txt') }} - restore-keys: | - ${{ runner.os }}-pip- - name: set up python - uses: actions/setup-python@v1 + uses: actions/setup-python@v4 + with: + cache: 'pip' + cache-dependency-path: | + requirements/development.txt + requirements/production.txt - name: install dependencies run: | diff --git a/.github/workflows/unit_tests.yml b/.github/workflows/unit_tests.yml index a073091f..223ff0f9 100644 --- a/.github/workflows/unit_tests.yml +++ b/.github/workflows/unit_tests.yml @@ -1,5 +1,7 @@ name: unit tests on: [push, pull_request] +env: + DOXYGEN_VERSION: 1.9.4 jobs: build: @@ -37,16 +39,14 @@ jobs: steps: - uses: actions/checkout@v2 - - uses: actions/cache@v1 - with: - path: ~/.cache/pip - key: ${{ runner.os }}-pip-${{ hashFiles('requirements/*.txt') }} - restore-keys: | - ${{ runner.os }}-pip- - name: set up python ${{ matrix.python-version }} - uses: actions/setup-python@v1 + uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} + cache: 'pip' + cache-dependency-path: | + requirements/development.txt + requirements/production.txt - name: install sphinx from PyPI or from git run: | @@ -56,12 +56,22 @@ jobs: pip install -Iv Sphinx==${{ matrix.sphinx-version }} fi - - name: install doxygen from SF binary archives - env: - DOXYGEN_VERSION: 1.9.4 + - name: cache Doxygen + id: cache-doxygen + uses: actions/cache@v3 + with: + path: doxygen-bin-arc/doxygen.tar.gz + key: ${{ runner.os }}-doxygen + + - name: download Doxygen from SF binary archives + if: steps.cache-doxygen.outputs.cache-hit != 'true' + run: | + mkdir doxygen-bin-arc + curl -L https://sourceforge.net/projects/doxygen/files/rel-$DOXYGEN_VERSION/doxygen-$DOXYGEN_VERSION.linux.bin.tar.gz > doxygen-bin-arc/doxygen.tar.gz + + - name: install Doxygen run: | - mkdir doxygen-bin-arc && cd doxygen-bin-arc - curl -L https://sourceforge.net/projects/doxygen/files/rel-$DOXYGEN_VERSION/doxygen-$DOXYGEN_VERSION.linux.bin.tar.gz > doxygen.tar.gz + cd doxygen-bin-arc gunzip doxygen.tar.gz tar xf doxygen.tar cd doxygen-$DOXYGEN_VERSION diff --git a/tests/data/auto/compare.xml b/tests/data/auto/compare.xml index 5524c8b8..ae522088 100644 --- a/tests/data/auto/compare.xml +++ b/tests/data/auto/compare.xml @@ -3,21 +3,21 @@ - class AutoClassTest + class AutoClassTest class outside of namespace Private Functions - inline void member + inline void member non-namespaced class function - inline void anotherMember + inline void anotherMember non-namespaced class other function @@ -29,14 +29,14 @@ Functions - void autoFunction + void autoFunction non-namespaced class function - void anotherAutoFunction + void anotherAutoFunction non-namespaced class other function diff --git a/tests/data/examples/test_alias/compare.xml b/tests/data/examples/test_alias/compare.xml index a65a1303..a24fbd7e 100644 --- a/tests/data/examples/test_alias/compare.xml +++ b/tests/data/examples/test_alias/compare.xml @@ -5,7 +5,7 @@ Functions - void frob_foosvoid *Frobs + void frob_foosvoid *Frobs Foo frob routine. bob this something elseSide EffectsFrobs any foos.bob this something elseSide EffectsFrobs any foos. diff --git a/tests/data/examples/test_array/compare.xml b/tests/data/examples/test_array/compare.xml index a5775c5d..c978b27e 100644 --- a/tests/data/examples/test_array/compare.xml +++ b/tests/data/examples/test_array/compare.xml @@ -3,14 +3,14 @@ - int fooint a[5] + int fooint a[5] My function. - int barint nint a[] + int barint nint a[] My other function. Test:This declaration is supposed to be int bar(int n, int a[static n]); But, Sphinx fails to recognize int a[static n]) as a C specific array syntax diff --git a/tests/data/examples/test_c_enum/compare.xml b/tests/data/examples/test_c_enum/compare.xml index 2a338d57..4b90ff00 100644 --- a/tests/data/examples/test_c_enum/compare.xml +++ b/tests/data/examples/test_c_enum/compare.xml @@ -3,7 +3,7 @@ - enum GSM_BackupFormat + enum GSM_BackupFormat Backup data. Values: diff --git a/tests/data/examples/test_c_file/compare.xml b/tests/data/examples/test_c_file/compare.xml index ac1925ac..d379fa38 100644 --- a/tests/data/examples/test_c_file/compare.xml +++ b/tests/data/examples/test_c_file/compare.xml @@ -5,33 +5,33 @@ Defines - WRITE_TREE_MISSING_OK + WRITE_TREE_MISSING_OK bitmasks to write_cache_as_tree flags - WRITE_TREE_IGNORE_CACHE_TREE + WRITE_TREE_IGNORE_CACHE_TREE - WRITE_TREE_UNREADABLE_INDEX + WRITE_TREE_UNREADABLE_INDEX error return codes - WRITE_TREE_UNMERGED_INDEX + WRITE_TREE_UNMERGED_INDEX - WRITE_TREE_PREFIX_ERROR + WRITE_TREE_PREFIX_ERROR @@ -40,67 +40,67 @@ Functions - struct cache_tree *cache_treevoid + struct cache_tree *cache_treevoid - void cache_tree_freestruct cache_tree** + void cache_tree_freestruct cache_tree** - void cache_tree_invalidate_pathstruct cache_tree*const char* + void cache_tree_invalidate_pathstruct cache_tree*const char* - struct cache_tree_sub *cache_tree_substruct cache_tree*const char* + struct cache_tree_sub *cache_tree_substruct cache_tree*const char* - void cache_tree_writestruct strbuf*struct cache_tree *root + void cache_tree_writestruct strbuf*struct cache_tree *root - struct cache_tree *cache_tree_readconst char *bufferunsigned long size + struct cache_tree *cache_tree_readconst char *bufferunsigned long size - int cache_tree_fully_validstruct cache_tree* + int cache_tree_fully_validstruct cache_tree* - int cache_tree_updatestruct cache_tree*struct cache_entry**intintint + int cache_tree_updatestruct cache_tree*struct cache_entry**intintint - int write_cache_as_treeunsigned char *sha1int flagsconst char *prefix + int write_cache_as_treeunsigned char *sha1int flagsconst char *prefix - void prime_cache_treestruct cache_tree**struct tree* + void prime_cache_treestruct cache_tree**struct tree* - int cache_tree_matches_traversalstruct cache_tree*struct name_entry *entstruct traverse_info *info + int cache_tree_matches_traversalstruct cache_tree*struct name_entry *entstruct traverse_info *info @@ -109,7 +109,7 @@ Variables - struct cache_tree global_cache_tree + struct cache_tree global_cache_tree Shared cache tree instance. @@ -117,13 +117,13 @@ - struct cache_tree_sub + struct cache_tree_sub - struct cache_tree + struct cache_tree diff --git a/tests/data/examples/test_class/compare.xml b/tests/data/examples/test_class/compare.xml index ed7f920d..840a0ba4 100644 --- a/tests/data/examples/test_class/compare.xml +++ b/tests/data/examples/test_class/compare.xml @@ -5,25 +5,25 @@ Functions - template<typename T>void f0 + template<typename T>void f0 - template<>void f0<std::string> + template<>void f0<std::string>
- class OuterClass + class OuterClass class outside of namespace - class InnerClass + class InnerClass inner class @@ -32,14 +32,14 @@ - class ClassTest + class ClassTest class outside of namespace Public Functions - void functionint myParameter + void functionint myParameter non-namespaced class function More details in the header file. @@ -48,7 +48,7 @@ - void anotherFunction + void anotherFunction non-namespaced class other function More documentation in the impl file @@ -56,14 +56,14 @@ - virtual void publicFunction const = 0 + virtual void publicFunction const = 0 namespaced class function - virtual void undocumentedPublicFunction const = 0 + virtual void undocumentedPublicFunction const = 0 @@ -72,14 +72,14 @@ Protected Functions - inline void protectedFunction + inline void protectedFunction A protected function. - inline void undocumentedProtectedFunction + inline void undocumentedProtectedFunction @@ -88,93 +88,93 @@ Private Functions - virtual void privateFunction const = 0 + virtual void privateFunction const = 0 This is a private function. - virtual void undocumentedPrivateFunction const = 0 + virtual void undocumentedPrivateFunction const = 0 - class PrivateClass + class PrivateClass A private class. - struct PrivateStruct + struct PrivateStruct A private struct. - class ProtectedClass + class ProtectedClass A protected class. - struct ProtectedStruct + struct ProtectedStruct A protected struct. - class PublicClass + class PublicClass A public class. - struct PublicStruct + struct PublicStruct A public struct. - class UndocumentedPrivateClass + class UndocumentedPrivateClass - struct UndocumentedPrivateStruct + struct UndocumentedPrivateStruct - class UndocumentedProtectedClass + class UndocumentedProtectedClass - struct UndocumentedProtectedStruct + struct UndocumentedProtectedStruct - class UndocumentedPublicClass + class UndocumentedPublicClass - struct UndocumentedPublicStruct + struct UndocumentedPublicStruct @@ -182,25 +182,25 @@ - namespace TestNamespaceClasses + namespace TestNamespaceClasses - class ClassTest + class ClassTest second class inside of namespace Public Functions - inline void function + inline void function second namespaced class function - inline void anotherFunction + inline void anotherFunction second namespaced class other function @@ -210,27 +210,27 @@ - class NamespacedClassTest + class NamespacedClassTest first class inside of namespace Public Functions - virtual void function const = 0 + virtual void function const = 0 namespaced class function - inline explicit NamespacedClassTest + inline explicit NamespacedClassTest - inline void anotherFunction + inline void anotherFunction namespaced class other function @@ -240,7 +240,7 @@ Public Static Functions - static void functionS + static void functionS @@ -251,38 +251,38 @@ - namespace NS1 + namespace NS1 Functions - template<typename T>void f1 + template<typename T>void f1 - template<>void f1<std::string> + template<>void f1<std::string> - namespace NS2 + namespace NS2 Functions - template<typename T>void f2 + template<typename T>void f2 - template<>void f2<std::string> + template<>void f2<std::string> diff --git a/tests/data/examples/test_code_blocks/compare.xml b/tests/data/examples/test_code_blocks/compare.xml index 4a7eac79..ded91eba 100644 --- a/tests/data/examples/test_code_blocks/compare.xml +++ b/tests/data/examples/test_code_blocks/compare.xml @@ -5,7 +5,7 @@ Functions - void with_standard_code_block + void with_standard_code_block A function with an unannotated code block with C/C++ code. char* buffer = new char[42]; @@ -14,7 +14,7 @@ int charsAdded = sprintf(buffer, "Tabs are normally %d spaces\n", 8); - void with_unannotated_cmake_code_block + void with_unannotated_cmake_code_block A function with an unannotated code block with non-C/C++ code. set(user_list A B C) @@ -26,7 +26,7 @@ endforeach() - void with_annotated_cmake_code_block + void with_annotated_cmake_code_block A function with an annotated cmake code block. set(user_list A B C) diff --git a/tests/data/examples/test_cpp_concept/compare.xml b/tests/data/examples/test_cpp_concept/compare.xml index 74fb257e..4402ebcc 100644 --- a/tests/data/examples/test_cpp_concept/compare.xml +++ b/tests/data/examples/test_cpp_concept/compare.xml @@ -3,7 +3,7 @@ - template<typename T>concept Hashable + template<typename T>concept Hashable diff --git a/tests/data/examples/test_cpp_enum/compare.xml b/tests/data/examples/test_cpp_enum/compare.xml index 5b81c11a..13fe8b02 100644 --- a/tests/data/examples/test_cpp_enum/compare.xml +++ b/tests/data/examples/test_cpp_enum/compare.xml @@ -3,12 +3,12 @@ - enum Unscoped + enum Unscoped Values: - enumerator UnscopedEnumerator + enumerator UnscopedEnumerator @@ -16,12 +16,12 @@ - enum class ScopedStruct : int + enum class ScopedStruct : int Values: - enumerator Enumerator + enumerator Enumerator @@ -29,12 +29,12 @@ - enum class ScopedClass : int + enum class ScopedClass : int Values: - enumerator Enumerator + enumerator Enumerator @@ -42,12 +42,12 @@ - enum class ScopedClassNoUnderlying + enum class ScopedClassNoUnderlying Values: - enumerator Enumerator + enumerator Enumerator diff --git a/tests/data/examples/test_cpp_friendclass/compare.xml b/tests/data/examples/test_cpp_friendclass/compare.xml index eabbdf48..2878d599 100644 --- a/tests/data/examples/test_cpp_friendclass/compare.xml +++ b/tests/data/examples/test_cpp_friendclass/compare.xml @@ -3,19 +3,19 @@ - struct A + struct A - struct B + struct B - struct C + struct C Friends diff --git a/tests/data/examples/test_cpp_function/compare.xml b/tests/data/examples/test_cpp_function/compare.xml index 38cccec1..c5e7814e 100644 --- a/tests/data/examples/test_cpp_function/compare.xml +++ b/tests/data/examples/test_cpp_function/compare.xml @@ -3,37 +3,37 @@ - struct Foo + struct Foo - struct Class + struct Class Public Functions - virtual void f1 volatile const & = 0 + virtual void f1 volatile const & = 0 - virtual void f2 volatile const && = 0 + virtual void f2 volatile const && = 0 - int f_issue_338 noexcept + int f_issue_338 noexcept - int anon_paramsintintint xchar* + int anon_paramsintintint xchar* @@ -42,7 +42,7 @@ Public Members - void (*f_issue_489)(struct Foo *foo, int value) + void (*f_issue_489)(struct Foo *foo, int value) @@ -51,7 +51,7 @@ Public Static Functions - static void f3 + static void f3 @@ -60,14 +60,14 @@ - namespace TestNamespaceFunction + namespace TestNamespaceFunction A namespace to demonstrate a namespaced function. Functions - void namespaceFunc + void namespaceFunc A function within a namspace. diff --git a/tests/data/examples/test_cpp_inherited_members/compare.xml b/tests/data/examples/test_cpp_inherited_members/compare.xml index 86ade12e..2f288029 100644 --- a/tests/data/examples/test_cpp_inherited_members/compare.xml +++ b/tests/data/examples/test_cpp_inherited_members/compare.xml @@ -3,7 +3,7 @@ - class Base + class Base Base class. Subclassed by A, B @@ -21,7 +21,7 @@ - class A : public Base + class A : public Base Class A. @@ -38,7 +38,7 @@ - class B : public Base + class B : public Base Class B. diff --git a/tests/data/examples/test_cpp_trailing_return_type/compare.xml b/tests/data/examples/test_cpp_trailing_return_type/compare.xml index df22a0ca..fcadc152 100644 --- a/tests/data/examples/test_cpp_trailing_return_type/compare.xml +++ b/tests/data/examples/test_cpp_trailing_return_type/compare.xml @@ -5,7 +5,7 @@ Functions - auto f_issue_441 -> Thingy* + auto f_issue_441 -> Thingy* Function that creates a thingy. @@ -13,7 +13,7 @@ - class Thingy + class Thingy needed for references in global function return type diff --git a/tests/data/examples/test_define/compare.xml b/tests/data/examples/test_define/compare.xml index 5b4283eb..c6e75e9c 100644 --- a/tests/data/examples/test_define/compare.xml +++ b/tests/data/examples/test_define/compare.xml @@ -3,21 +3,21 @@ - USE_STUFF + USE_STUFF A simple define without a value. - MAX_LENGTH + MAX_LENGTH A define with a simple value. - MAXIMUMAB + MAXIMUMAB A define with some parameters. @@ -45,7 +45,7 @@ - SWAPAB + SWAPAB A define which spans multiple lines. diff --git a/tests/data/examples/test_dot_graphs/compare.xml b/tests/data/examples/test_dot_graphs/compare.xml index ff9de2f2..c5c93871 100644 --- a/tests/data/examples/test_dot_graphs/compare.xml +++ b/tests/data/examples/test_dot_graphs/compare.xml @@ -2,16 +2,16 @@ - page dotgraphs + page dotgraphs
Using @dot command -
basic graph elements
+
basic graph elements
Using @dotfile command - +
diff --git a/tests/data/examples/test_group/compare.xml b/tests/data/examples/test_group/compare.xml index 9f98429a..08456c26 100644 --- a/tests/data/examples/test_group/compare.xml +++ b/tests/data/examples/test_group/compare.xml @@ -2,14 +2,14 @@ - group mygroup + group mygroup This is the first group. Functions - void groupedFunction + void groupedFunction This function is in MyGroup. @@ -17,14 +17,14 @@ - class GroupedClassTest + class GroupedClassTest first class inside of namespace Public Functions - virtual void publicFunction const = 0 + virtual void publicFunction const = 0 namespaced class function @@ -32,14 +32,14 @@ - class PublicClass + class PublicClass A protected class. - class UndocumentedPublicClass + class UndocumentedPublicClass @@ -48,19 +48,19 @@ - group innergroup + group innergroup This is an inner group. - class InnerGroupClassTest + class InnerGroupClassTest inner class inside of namespace Public Functions - inline void function + inline void function inner namespaced class function diff --git a/tests/data/examples/test_group_content_only/compare.xml b/tests/data/examples/test_group_content_only/compare.xml index 25d27d92..f97acfbd 100644 --- a/tests/data/examples/test_group_content_only/compare.xml +++ b/tests/data/examples/test_group_content_only/compare.xml @@ -3,7 +3,7 @@ - struct Structy + struct Structy Hello. diff --git a/tests/data/examples/test_headings/compare.xml b/tests/data/examples/test_headings/compare.xml index 7ab8ba66..1e23289b 100644 --- a/tests/data/examples/test_headings/compare.xml +++ b/tests/data/examples/test_headings/compare.xml @@ -3,7 +3,7 @@ - class HeadingsTest + class HeadingsTest This is a documentation. This is more documentation. diff --git a/tests/data/examples/test_image/compare.xml b/tests/data/examples/test_image/compare.xml index 4b912e30..b0b1a12b 100644 --- a/tests/data/examples/test_image/compare.xml +++ b/tests/data/examples/test_image/compare.xml @@ -3,7 +3,7 @@ - class ImageClass + class ImageClass This is a class with an image in the description. It renders like this: diff --git a/tests/data/examples/test_index/compare.xml b/tests/data/examples/test_index/compare.xml index fa46155a..b7a6d2b5 100644 --- a/tests/data/examples/test_index/compare.xml +++ b/tests/data/examples/test_index/compare.xml @@ -72,7 +72,7 @@ - template<typename T>concept N + template<typename T>concept N @@ -107,7 +107,7 @@ - file index.h + file index.h Defines diff --git a/tests/data/examples/test_inheritance/compare.xml b/tests/data/examples/test_inheritance/compare.xml index c9480e0e..bf8530f2 100644 --- a/tests/data/examples/test_inheritance/compare.xml +++ b/tests/data/examples/test_inheritance/compare.xml @@ -3,21 +3,21 @@ - class BaseA + class BaseA Subclassed by ChildV1, ChildV2, Main - class BaseB + class BaseB Subclassed by Main - class Main : public BaseA, private BaseB + class Main : public BaseA, private BaseB This is the main class we’re interested in. Subclassed by ChildA, ChildB @@ -25,33 +25,33 @@ - class ChildA : public Main + class ChildA : public Main - class ChildB : public Main + class ChildB : public Main - class ChildV1 : public virtual BaseA + class ChildV1 : public virtual BaseA Subclassed by ChildV3 - class ChildV2 : public virtual BaseA + class ChildV2 : public virtual BaseA Subclassed by ChildV3 - class ChildV3 : public ChildV1, private ChildV2 + class ChildV3 : public ChildV1, private ChildV2 diff --git a/tests/data/examples/test_inline/compare.xml b/tests/data/examples/test_inline/compare.xml index 2faf049d..3b6db6e1 100644 --- a/tests/data/examples/test_inline/compare.xml +++ b/tests/data/examples/test_inline/compare.xml @@ -3,14 +3,14 @@ - class InlineTest + class InlineTest A class to demonstrate inline documentation syntax. Public Functions - const char *memberchar cint n + const char *memberchar cint n A member function. Details about member function diff --git a/tests/data/examples/test_latexmath/compare.xml b/tests/data/examples/test_latexmath/compare.xml index 21cc94ac..424e6cc3 100644 --- a/tests/data/examples/test_latexmath/compare.xml +++ b/tests/data/examples/test_latexmath/compare.xml @@ -3,7 +3,7 @@ - class MathHelper + class MathHelper A class. A inline formula: f(x) = a + b diff --git a/tests/data/examples/test_links/compare.xml b/tests/data/examples/test_links/compare.xml index 32ddb67f..2e42e7a2 100644 --- a/tests/data/examples/test_links/compare.xml +++ b/tests/data/examples/test_links/compare.xml @@ -3,7 +3,7 @@ - class LinksTest + class LinksTest first struct inside of namespace This is a longer description with a link to a webpage in the text http://www.github.com in order to test out Breathe’s handling of links. diff --git a/tests/data/examples/test_lists/compare.xml b/tests/data/examples/test_lists/compare.xml index e45d6f29..cd1d2047 100644 --- a/tests/data/examples/test_lists/compare.xml +++ b/tests/data/examples/test_lists/compare.xml @@ -3,7 +3,7 @@ - class SimpleList_1 + class SimpleList_1 This is a list example. Following is a list using ‘+’ for bullets:One item.Two items.Three items.Four. @@ -12,7 +12,7 @@ - class SimpleList_2 + class SimpleList_2 This is a list example. Following is a list using ‘-’ for bullets:One item.Two items.Three items.Four. @@ -21,7 +21,7 @@ - class SimpleList_3 + class SimpleList_3 This is a list example. Following is a list using ‘*’ for bullets:One item.Two items.Three items.Four. @@ -30,7 +30,7 @@ - class SimpleList_4 + class SimpleList_4 This is a list example. Following is an auto-numbered list:One item.Two items.Three items.Four. @@ -39,7 +39,7 @@ - class SimpleList_5 + class SimpleList_5 This is a list example. Following is a numbered list:One item.Two items.Three items.Four. @@ -48,7 +48,7 @@ - class SimpleList_6 + class SimpleList_6 This is a list example. Following is an unordered list using ‘HTML’ tags: One item. Two items. Three items. Four. @@ -57,7 +57,7 @@ - class NestedLists_1 + class NestedLists_1 A list of events: mouse eventsmouse move eventmouse click eventMore info about the click event.mouse double click eventkeyboard eventskey down eventkey up event @@ -66,7 +66,7 @@ - class NestedLists_2 + class NestedLists_2 Text before the list. list item 1sub item 1sub sub item 1sub sub item 2 @@ -78,7 +78,7 @@ - class NestedLists_3 + class NestedLists_3 A list of events: mouse events mouse move event mouse click eventMore info about the click event.mouse double click event keyboard events key down event key up event More text here. @@ -86,7 +86,7 @@ - class NestedLists_4 + class NestedLists_4 A list of events: mouse eventsmouse move eventswipe eventcircle eventwave eventmouse click eventMore info about the click event.mouse double click eventkeyboard eventskey down eventkey up eventtouch eventspinch eventswipe event More text here. @@ -94,7 +94,7 @@ - class NestedLists_5 + class NestedLists_5 A deeply nested list of events: mouse eventsmouse move eventswipe eventswipe leftswipe rightcircle eventwave eventmouse click eventMore info about the click event.mouse double click eventkeyboard eventskey down eventkey up eventtouch eventspinch eventswipe event More text here. diff --git a/tests/data/examples/test_membergroups/compare.xml b/tests/data/examples/test_membergroups/compare.xml index 9bf2285e..4227d3ae 100644 --- a/tests/data/examples/test_membergroups/compare.xml +++ b/tests/data/examples/test_membergroups/compare.xml @@ -3,21 +3,21 @@ - class GroupedMembers + class GroupedMembers demonstrates member groups myGroup - void in_mygroup_oneint myParameter + void in_mygroup_oneint myParameter A function. - void in_mygroup_twoint myParameter + void in_mygroup_twoint myParameter Another function. @@ -27,7 +27,7 @@ Public Functions - void not_in_mygroupint myParameter + void not_in_mygroupint myParameter This one is not in myGroup. diff --git a/tests/data/examples/test_param_dirs/compare.xml b/tests/data/examples/test_param_dirs/compare.xml index ea412997..71c57967 100644 --- a/tests/data/examples/test_param_dirs/compare.xml +++ b/tests/data/examples/test_param_dirs/compare.xml @@ -3,7 +3,7 @@ - int processvoid *ivoid *ovoid *io + int processvoid *ivoid *ovoid *io diff --git a/tests/data/examples/test_python/compare.xml b/tests/data/examples/test_python/compare.xml index dbb64780..553c1a81 100644 --- a/tests/data/examples/test_python/compare.xml +++ b/tests/data/examples/test_python/compare.xml @@ -3,7 +3,7 @@ - module pyexample + module pyexample Documentation for this module. More details. @@ -11,7 +11,7 @@ Functions - func + func Documentation for a function. More details. @@ -20,7 +20,7 @@ - class PyClass + class PyClass Documentation for a class. More details. @@ -28,14 +28,14 @@ Public Functions - __init__self + __init__self The constructor. - PyMethodself + PyMethodself Documentation for a method. @@ -53,7 +53,7 @@ Public Static Attributes - classVar = 0 + classVar = 0 A class variable. @@ -63,7 +63,7 @@ Protected Attributes - _memVar + _memVar a member variable diff --git a/tests/data/examples/test_qtsignalsandslots/compare.xml b/tests/data/examples/test_qtsignalsandslots/compare.xml index 7b9bcdef..def81917 100644 --- a/tests/data/examples/test_qtsignalsandslots/compare.xml +++ b/tests/data/examples/test_qtsignalsandslots/compare.xml @@ -3,13 +3,13 @@ - class QtSignalSlotExample : public QObject + class QtSignalSlotExample : public QObject Public Functions - inline void workingFunctionint iShownParameter + inline void workingFunctionint iShownParameter @@ -26,7 +26,7 @@ Public Slots - inline void workingSlotint iShown + inline void workingSlotint iShown @@ -43,7 +43,7 @@ Signals - void workingSignalint iShown + void workingSignalint iShown diff --git a/tests/data/examples/test_rst/compare.xml b/tests/data/examples/test_rst/compare.xml index 1633fed0..d05d0d8e 100644 --- a/tests/data/examples/test_rst/compare.xml +++ b/tests/data/examples/test_rst/compare.xml @@ -3,14 +3,14 @@ - class TestClass + class TestClass first class inside of namespace Public Functions - virtual void function const = 0 + virtual void function const = 0 Inserting additional reStructuredText information. This is some funky non-XML compliant text: <& !>< @@ -24,7 +24,7 @@ - virtual void rawVerbatim const = 0 + virtual void rawVerbatim const = 0 Inserting additional reStructuredText information. @@ -34,7 +34,7 @@ - virtual void rawLeadingAsteriskVerbatim const = 0 + virtual void rawLeadingAsteriskVerbatim const = 0 Inserting additional reStructuredText information. Some example code:int example(int x) { @@ -44,7 +44,7 @@ - virtual void rawLeadingSlashesVerbatimint something const = 0 + virtual void rawLeadingSlashesVerbatimint something const = 0 Some kind of method. bool foo(bool something) { @@ -65,14 +65,14 @@ - virtual void rawInlineVerbatim const = 0 + virtual void rawInlineVerbatim const = 0 Inserting an inline reStructuredText snippet. Linking to another function: TestClass::rawVerbatim() - inline virtual void testFunction const + inline virtual void testFunction const Brief description. diff --git a/tests/data/examples/test_simplesect/compare.xml b/tests/data/examples/test_simplesect/compare.xml index 139e4d8e..b0da1155 100644 --- a/tests/data/examples/test_simplesect/compare.xml +++ b/tests/data/examples/test_simplesect/compare.xml @@ -3,7 +3,7 @@ - template<typename T1, typename T2>void fint afloat bstd::string c + template<typename T1, typename T2>void fint afloat bstd::string c see, f_raw sa, f_raw Remarkremark, 1 Remarkremark, 2 Remarkremarks, 1 Remarkremarks, 2 par, something diff --git a/tests/data/examples/test_tables/compare.xml b/tests/data/examples/test_tables/compare.xml index 767fbc86..a883b4d7 100644 --- a/tests/data/examples/test_tables/compare.xml +++ b/tests/data/examples/test_tables/compare.xml @@ -3,7 +3,7 @@ - class Table_1 + class Table_1 This is a simple Markdown table example. Following is a simple table using Markdown syntax. @@ -13,7 +13,7 @@ - class Table_2 + class Table_2 This is a Markdown table with alignment. Following is a table with alignment using Markdown syntax. @@ -23,7 +23,7 @@ - class Table_3 + class Table_3 This is a Markdown table with rowspan and alignment. Following is a table with rowspan and alignment using Markdown syntax. @@ -33,7 +33,7 @@ - class Table_4 + class Table_4 This is a Markdown table with colspan and alignment. Following is a table with colspan and alignment using Markdown syntax. @@ -43,7 +43,7 @@ - class Table_5 + class Table_5 This is a Doxygen table. Following is a table using Doxygen syntax (and all supported features). diff --git a/tests/data/examples/test_template_class_non_type/compare.xml b/tests/data/examples/test_template_class_non_type/compare.xml index efa1afc7..fae79d38 100644 --- a/tests/data/examples/test_template_class_non_type/compare.xml +++ b/tests/data/examples/test_template_class_non_type/compare.xml @@ -3,7 +3,7 @@ - template<typename T, typename U, int N>class anothertemplateclass + template<typename T, typename U, int N>class anothertemplateclass a class with three template parameters @@ -28,14 +28,14 @@ Public Functions - inline anothertemplateclass + inline anothertemplateclass default constructor - inline anothertemplateclassT const &m1U const &m2 + inline anothertemplateclassT const &m1U const &m2 constructor with two template argument @@ -57,7 +57,7 @@ - U methodT const &t + U methodT const &t member accepting template argument and returning template argument diff --git a/tests/data/examples/test_template_function/compare.xml b/tests/data/examples/test_template_function/compare.xml index ea76b225..2f5b7c4f 100644 --- a/tests/data/examples/test_template_function/compare.xml +++ b/tests/data/examples/test_template_function/compare.xml @@ -5,7 +5,7 @@ Functions - template<typename T>T function1T arg1 + template<typename T>T function1T arg1 a function with one template arguments @@ -32,7 +32,7 @@ - template<>std::string function1<std::string>std::string arg1 + template<>std::string function1<std::string>std::string arg1 a function with one template argument specialized for std::string @@ -53,7 +53,7 @@ - template<typename T, typename U, int N>T function2T arg1U arg2 + template<typename T, typename U, int N>T function2T arg1U arg2 a function with three template arguments @@ -97,7 +97,7 @@ - template<typename T = void, typename, int>void function3 + template<typename T = void, typename, int>void function3 a function with unnamed arguments and an argument with a default value diff --git a/tests/data/examples/test_template_type_alias/compare.xml b/tests/data/examples/test_template_type_alias/compare.xml index 4623cb2c..d7c6bb36 100644 --- a/tests/data/examples/test_template_type_alias/compare.xml +++ b/tests/data/examples/test_template_type_alias/compare.xml @@ -5,7 +5,7 @@ Typedefs - template<typename T>using IsFuzzy = std::is_fuzzy<T> + template<typename T>using IsFuzzy = std::is_fuzzy<T> a type alias with one template argument @@ -20,7 +20,7 @@ - template<typename T, typename U, int N>using IsFurry = std::is_furry<T, U, N> + template<typename T, typename U, int N>using IsFurry = std::is_furry<T, U, N> a type alias with three template arguments diff --git a/tests/data/examples/test_union/compare.xml b/tests/data/examples/test_union/compare.xml index 34973b58..15950980 100644 --- a/tests/data/examples/test_union/compare.xml +++ b/tests/data/examples/test_union/compare.xml @@ -3,21 +3,21 @@ - union SeparateUnion + union SeparateUnion A union of two values. Public Members - int size + int size The size of the thing. - float depth + float depth How deep it is. @@ -27,25 +27,25 @@ - namespace foo + namespace foo - union MyUnion + union MyUnion A union of two values. Public Members - int someInt + int someInt The int of it all. - float someFloat + float someFloat The float side of things. diff --git a/tests/data/examples/test_userdefined/compare.xml b/tests/data/examples/test_userdefined/compare.xml index 63135fa5..008da03e 100644 --- a/tests/data/examples/test_userdefined/compare.xml +++ b/tests/data/examples/test_userdefined/compare.xml @@ -3,7 +3,7 @@ - class UserDefinedGroupTest + class UserDefinedGroupTest A class. More details about the UserDefinedGroupTest class @@ -12,7 +12,7 @@ Description of custom group - void func1InCustomGroup + void func1InCustomGroup Function 1 in custom group. Details. @@ -20,7 +20,7 @@ - void func2InCustomGroup + void func2InCustomGroup Function 2 in custom group. Details. @@ -31,7 +31,7 @@ Public Functions - void func1InGroup1 + void func1InGroup1 Same documentation for both members. Details @@ -39,7 +39,7 @@ - void ungroupedFunction + void ungroupedFunction Function without group. Details. diff --git a/tests/data/examples/test_xrefsect/compare.xml b/tests/data/examples/test_xrefsect/compare.xml index f8cb432d..3e6a3e51 100644 --- a/tests/data/examples/test_xrefsect/compare.xml +++ b/tests/data/examples/test_xrefsect/compare.xml @@ -6,7 +6,7 @@ Functions - int unimplementedvoid + int unimplementedvoid An example of using Doxygen’s todo command. Todo:Implement this function. @@ -14,7 +14,7 @@ - void buggy_functionint param + void buggy_functionint param An example of using Doxygen’s bug and test commands. Bug:Does not work yet. @@ -23,7 +23,7 @@ - void old_functionvoid + void old_functionvoid An example of using Doxygen’s deprecated command. Deprecated:Should not be used on new code. @@ -31,7 +31,7 @@ - void sample_xrefitem_functionvoid + void sample_xrefitem_functionvoid An example of a custom Doxygen xrefitem declared as an ALIAS. xref Sample:This text shows up in the xref output. From d7f774662b4877d98d32b37aa2b27745665e1e50 Mon Sep 17 00:00:00 2001 From: Rouslan Korneychuk Date: Wed, 13 Dec 2023 06:20:34 -0500 Subject: [PATCH 38/65] Better caching --- .github/workflows/cache_doxygen.yml | 36 +++++++++++++++++++++++++++++ .github/workflows/documentation.yml | 32 ++++++++++++------------- .github/workflows/unit_tests.yml | 28 +++++++++++----------- 3 files changed, 65 insertions(+), 31 deletions(-) create mode 100644 .github/workflows/cache_doxygen.yml diff --git a/.github/workflows/cache_doxygen.yml b/.github/workflows/cache_doxygen.yml new file mode 100644 index 00000000..ac9ed939 --- /dev/null +++ b/.github/workflows/cache_doxygen.yml @@ -0,0 +1,36 @@ +name: download and compile Doxygen +on: + workflow_call +env: + DOXYGEN_VERSION: 1.9.4 +jobs: + install: + runs-on: ubuntu-latest + concurrency: + group: linux-doxygen-cache + steps: + - uses: actions/cache/restore@v3 + id: cache-doxygen + with: + path: doxygen-bin-arc + key: ${{ runner.os }}-doxygen-${{ env.DOXYGEN_VERSION }} + lookup-only: true + restore-keys: | + ${{ runner.os }}-doxygen- + + - name: download and build Doxygen from SF binary archives + if: steps.cache-doxygen.outputs.cache-hit != 'true' + run: | + mkdir doxygen-bin-arc && cd doxygen-bin-arc + curl -L https://sourceforge.net/projects/doxygen/files/rel-$DOXYGEN_VERSION/doxygen-$DOXYGEN_VERSION.linux.bin.tar.gz > doxygen.tar.gz + gunzip doxygen.tar.gz + tar xf doxygen.tar + mv doxygen-$DOXYGEN_VERSION doxygen + cd doxygen + make + + - uses: actions/cache/save@v3 + if: steps.cache-doxygen.outputs.cache-hit != 'true' + with: + path: doxygen-bin-arc + key: ${{ steps.cache-doxygen.outputs.cache-primary-key }} diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml index 81ff9576..10eab725 100644 --- a/.github/workflows/documentation.yml +++ b/.github/workflows/documentation.yml @@ -1,8 +1,15 @@ name: build the documentation on: [push, pull_request] +env: + DOXYGEN_VERSION: 1.9.4 jobs: - build: + cache-doxygen: + runs-on: ubuntu-latest + steps: + - uses: ./.github/workflows/cache_doxygen.yml + build: + needs: cache-doxygen runs-on: ubuntu-latest steps: @@ -22,25 +29,18 @@ jobs: pip install -r requirements/development.txt pip install --editable . - - name: cache Doxygen + - uses: actions/cache/restore@v3 id: cache-doxygen - uses: actions/cache@v3 with: - path: doxygen-bin-arc/doxygen.tar.gz - key: ${{ runner.os }}-doxygen - - - name: download Doxygen from SF binary archives - if: steps.cache-doxygen.outputs.cache-hit != 'true' - run: | - mkdir doxygen-bin-arc - curl -L https://sourceforge.net/projects/doxygen/files/rel-$DOXYGEN_VERSION/doxygen-$DOXYGEN_VERSION.linux.bin.tar.gz > doxygen-bin-arc/doxygen.tar.gz - + path: doxygen-bin-arc + fail-on-cache-miss: true + key: ${{ runner.os }}-doxygen-${{ env.DOXYGEN_VERSION }} + restore-keys: | + ${{ runner.os }}-doxygen- + - name: install Doxygen run: | - cd doxygen-bin-arc - gunzip doxygen.tar.gz - tar xf doxygen.tar - cd doxygen-$DOXYGEN_VERSION + cd doxygen-bin-arc/doxygen sudo make install - name: build the documentation diff --git a/.github/workflows/unit_tests.yml b/.github/workflows/unit_tests.yml index 223ff0f9..459151b7 100644 --- a/.github/workflows/unit_tests.yml +++ b/.github/workflows/unit_tests.yml @@ -3,8 +3,13 @@ on: [push, pull_request] env: DOXYGEN_VERSION: 1.9.4 jobs: - build: + cache-doxygen: + runs-on: ubuntu-latest + steps: + - uses: ./.github/workflows/cache_doxygen.yml + build: + needs: cache-doxygen runs-on: ubuntu-latest strategy: fail-fast: false @@ -56,25 +61,18 @@ jobs: pip install -Iv Sphinx==${{ matrix.sphinx-version }} fi - - name: cache Doxygen + - uses: actions/cache/restore@v3 id: cache-doxygen - uses: actions/cache@v3 with: - path: doxygen-bin-arc/doxygen.tar.gz - key: ${{ runner.os }}-doxygen - - - name: download Doxygen from SF binary archives - if: steps.cache-doxygen.outputs.cache-hit != 'true' - run: | - mkdir doxygen-bin-arc - curl -L https://sourceforge.net/projects/doxygen/files/rel-$DOXYGEN_VERSION/doxygen-$DOXYGEN_VERSION.linux.bin.tar.gz > doxygen-bin-arc/doxygen.tar.gz + path: doxygen-bin-arc + fail-on-cache-miss: true + key: ${{ runner.os }}-doxygen-${{ env.DOXYGEN_VERSION }} + restore-keys: | + ${{ runner.os }}-doxygen- - name: install Doxygen run: | - cd doxygen-bin-arc - gunzip doxygen.tar.gz - tar xf doxygen.tar - cd doxygen-$DOXYGEN_VERSION + cd doxygen-bin-arc/doxygen sudo make install - name: install dependencies and build extension module From 2f27528d50fe689336a81820e502810f723cd425 Mon Sep 17 00:00:00 2001 From: Rouslan Korneychuk Date: Wed, 13 Dec 2023 06:24:22 -0500 Subject: [PATCH 39/65] Fixed mistake in actions --- .github/workflows/documentation.yml | 4 +--- .github/workflows/unit_tests.yml | 4 +--- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml index 10eab725..bcf7751c 100644 --- a/.github/workflows/documentation.yml +++ b/.github/workflows/documentation.yml @@ -4,9 +4,7 @@ env: DOXYGEN_VERSION: 1.9.4 jobs: cache-doxygen: - runs-on: ubuntu-latest - steps: - - uses: ./.github/workflows/cache_doxygen.yml + uses: ./.github/workflows/cache_doxygen.yml build: needs: cache-doxygen diff --git a/.github/workflows/unit_tests.yml b/.github/workflows/unit_tests.yml index 459151b7..93d844df 100644 --- a/.github/workflows/unit_tests.yml +++ b/.github/workflows/unit_tests.yml @@ -4,9 +4,7 @@ env: DOXYGEN_VERSION: 1.9.4 jobs: cache-doxygen: - runs-on: ubuntu-latest - steps: - - uses: ./.github/workflows/cache_doxygen.yml + uses: ./.github/workflows/cache_doxygen.yml build: needs: cache-doxygen From cdb8564f5db0d34e1de98f91639a6ea0d94c9c1c Mon Sep 17 00:00:00 2001 From: Rouslan Korneychuk Date: Wed, 13 Dec 2023 06:34:24 -0500 Subject: [PATCH 40/65] Another attempt at Github actions --- .github/workflows/cache_doxygen.yml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/.github/workflows/cache_doxygen.yml b/.github/workflows/cache_doxygen.yml index ac9ed939..6c36ad8d 100644 --- a/.github/workflows/cache_doxygen.yml +++ b/.github/workflows/cache_doxygen.yml @@ -18,7 +18,7 @@ jobs: restore-keys: | ${{ runner.os }}-doxygen- - - name: download and build Doxygen from SF binary archives + - name: download Doxygen from SF binary archives if: steps.cache-doxygen.outputs.cache-hit != 'true' run: | mkdir doxygen-bin-arc && cd doxygen-bin-arc @@ -26,8 +26,6 @@ jobs: gunzip doxygen.tar.gz tar xf doxygen.tar mv doxygen-$DOXYGEN_VERSION doxygen - cd doxygen - make - uses: actions/cache/save@v3 if: steps.cache-doxygen.outputs.cache-hit != 'true' From 7f48ddb2591f522fefed78eb1bb9d7ecd6d1ee87 Mon Sep 17 00:00:00 2001 From: Rouslan Korneychuk Date: Thu, 14 Dec 2023 19:26:14 -0500 Subject: [PATCH 41/65] Made tests compatible with older packages and Doxygen versions --- setup.cfg | 1 - setup.py | 14 ++- tests/conftest.py | 3 +- tests/data/auto/compare.xml | 10 +-- tests/data/examples/test_alias/compare.xml | 2 +- tests/data/examples/test_array/compare.xml | 4 +- tests/data/examples/test_c_enum/compare.xml | 42 ++++----- tests/data/examples/test_c_file/compare.xml | 38 ++++---- tests/data/examples/test_class/compare.xml | 80 ++++++++--------- .../examples/test_code_blocks/compare.xml | 6 +- .../examples/test_cpp_concept/compare.xml | 2 +- tests/data/examples/test_cpp_enum/compare.xml | 16 ++-- .../examples/test_cpp_friendclass/compare.xml | 6 +- .../examples/test_cpp_function/compare.xml | 20 ++--- .../test_cpp_inherited_members/compare.xml | 12 +-- .../test_cpp_trailing_return_type/compare.xml | 4 +- tests/data/examples/test_define/compare.xml | 8 +- tests/data/examples/test_group/compare.xml | 14 +-- .../test_group_content_only/compare.xml | 2 +- tests/data/examples/test_headings/compare.xml | 2 +- tests/data/examples/test_image/compare.xml | 4 +- tests/data/examples/test_index/compare.xml | 86 +++++++++--------- .../examples/test_inheritance/compare.xml | 16 ++-- tests/data/examples/test_inline/compare.xml | 4 +- .../data/examples/test_latexmath/compare.xml | 2 +- tests/data/examples/test_links/compare.xml | 2 +- tests/data/examples/test_lists/compare.xml | 22 ++--- .../examples/test_membergroups/compare.xml | 8 +- .../data/examples/test_param_dirs/compare.xml | 2 +- .../examples/test_python/compare-1.9.6.xml | 76 ++++++++++++++++ tests/data/examples/test_python/compare.xml | 18 ++-- .../test_qtsignalsandslots/compare.xml | 8 +- tests/data/examples/test_rst/compare.xml | 14 +-- .../data/examples/test_simplesect/compare.xml | 2 +- tests/data/examples/test_tables/compare.xml | 10 +-- .../test_template_class_non_type/compare.xml | 8 +- .../test_template_function/compare.xml | 8 +- .../test_template_type_alias/compare.xml | 4 +- tests/data/examples/test_union/compare.xml | 14 +-- .../examples/test_userdefined/compare.xml | 10 +-- tests/data/examples/test_xrefsect/compare.xml | 8 +- tests/test_examples.py | 88 +++++++++++++++---- xml_parser_generator/module_template.c.in | 3 +- 43 files changed, 413 insertions(+), 290 deletions(-) create mode 100644 tests/data/examples/test_python/compare-1.9.6.xml diff --git a/setup.cfg b/setup.cfg index 44b9b1a6..6da0f658 100644 --- a/setup.cfg +++ b/setup.cfg @@ -3,7 +3,6 @@ license_files = LICENSE [flake8] max-line-length = 100 -exclude = compoundsuper.py,indexsuper.py extend-ignore = E203, E231 per-file-ignores = breathe/parser/index.py:E305 diff --git a/setup.py b/setup.py index 7f7dd29c..17da7530 100644 --- a/setup.py +++ b/setup.py @@ -1,18 +1,14 @@ # -*- coding: utf-8 -*- -try: - from setuptools import setup, find_packages, Extension -except ImportError: - import distribute_setup - - distribute_setup.use_setuptools() - from setuptools import setup, find_packages, Extension - import sys import os.path +from setuptools import setup, find_packages, Extension from setuptools.command.build import build from setuptools.command.build_ext import build_ext +try: + from setuptools.dep_util import newer_group +except ImportError: + from distutils.dep_util import newer_group from distutils import log -from distutils.dep_util import newer_group from distutils.dir_util import mkpath from distutils.util import split_quoted diff --git a/tests/conftest.py b/tests/conftest.py index c8ea8403..267a701b 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,3 +1,4 @@ +import pathlib import pytest from sphinx.testing.fixtures import ( test_params, @@ -16,7 +17,7 @@ def app(test_params, app_params, make_app, shared_result): """ args, kwargs = app_params assert "srcdir" in kwargs - kwargs["srcdir"].mkdir(parents=True, exist_ok=True) + pathlib.Path(kwargs["srcdir"]).mkdir(parents=True, exist_ok=True) (kwargs["srcdir"] / "conf.py").write_text("") app_ = make_app(*args, **kwargs) yield app_ diff --git a/tests/data/auto/compare.xml b/tests/data/auto/compare.xml index ae522088..aa7913a2 100644 --- a/tests/data/auto/compare.xml +++ b/tests/data/auto/compare.xml @@ -3,21 +3,21 @@ - class AutoClassTest + class AutoClassTest class outside of namespace Private Functions - inline void member + inline void member non-namespaced class function - inline void anotherMember + inline void anotherMember non-namespaced class other function @@ -29,14 +29,14 @@ Functions - void autoFunction + void autoFunction non-namespaced class function - void anotherAutoFunction + void anotherAutoFunction non-namespaced class other function diff --git a/tests/data/examples/test_alias/compare.xml b/tests/data/examples/test_alias/compare.xml index a24fbd7e..5586b93b 100644 --- a/tests/data/examples/test_alias/compare.xml +++ b/tests/data/examples/test_alias/compare.xml @@ -5,7 +5,7 @@ Functions - void frob_foosvoid *Frobs + void frob_foosvoid *Frobs Foo frob routine. bob this something elseSide EffectsFrobs any foos.bob this something elseSide EffectsFrobs any foos. diff --git a/tests/data/examples/test_array/compare.xml b/tests/data/examples/test_array/compare.xml index c978b27e..dbabe685 100644 --- a/tests/data/examples/test_array/compare.xml +++ b/tests/data/examples/test_array/compare.xml @@ -3,14 +3,14 @@ - int fooint a[5] + int fooint a[5] My function. - int barint nint a[] + int barint nint a[] My other function. Test:This declaration is supposed to be int bar(int n, int a[static n]); But, Sphinx fails to recognize int a[static n]) as a C specific array syntax diff --git a/tests/data/examples/test_c_enum/compare.xml b/tests/data/examples/test_c_enum/compare.xml index 4b90ff00..5a1e0490 100644 --- a/tests/data/examples/test_c_enum/compare.xml +++ b/tests/data/examples/test_c_enum/compare.xml @@ -3,13 +3,13 @@ - enum GSM_BackupFormat + enum GSM_BackupFormat Backup data. Values: - enumerator GSM_Backup_Auto + enumerator GSM_Backup_Auto Compatibility with old gboolean used instead of format. File type is guessed for extension, non unicode format used for Gammu backup. @@ -17,7 +17,7 @@ - enumerator GSM_Backup_AutoUnicode + enumerator GSM_Backup_AutoUnicode Compatibility with old gboolean used instead of format. File type is guessed for extension, unicode format used for Gammu backup. @@ -25,42 +25,42 @@ - enumerator GSM_Backup_LMB + enumerator GSM_Backup_LMB LMB format, compatible with Logo manager, can store phonebooks and logos. - enumerator GSM_Backup_VCalendar + enumerator GSM_Backup_VCalendar vCalendar standard, can store todo and calendar entries. - enumerator GSM_Backup_VCard + enumerator GSM_Backup_VCard vCard standard, can store phone phonebook entries. - enumerator GSM_Backup_LDIF + enumerator GSM_Backup_LDIF LDIF (LDAP Data Interchange Format), can store phone phonebook entries. - enumerator GSM_Backup_ICS + enumerator GSM_Backup_ICS iCalendar standard, can store todo and calendar entries. - enumerator GSM_Backup_Gammu + enumerator GSM_Backup_Gammu Gammu own format can store almost anything from phone. This is ASCII version of the format, Unicode strings are HEX encoded. Use GSM_Backup_GammuUCS2 instead if possible. @@ -68,7 +68,7 @@ - enumerator GSM_Backup_GammuUCS2 + enumerator GSM_Backup_GammuUCS2 Gammu own format can store almost anything from phone. This is UCS2-BE version of the format. @@ -76,7 +76,7 @@ - enumerator GSM_Backup_VNote + enumerator GSM_Backup_VNote vNote standard, can store phone notes. @@ -85,7 +85,7 @@ - enumerator GSM_Backup_Auto + enumerator GSM_Backup_Auto Compatibility with old gboolean used instead of format. File type is guessed for extension, non unicode format used for Gammu backup. @@ -93,7 +93,7 @@ - enumerator GSM_Backup_AutoUnicode + enumerator GSM_Backup_AutoUnicode Compatibility with old gboolean used instead of format. File type is guessed for extension, unicode format used for Gammu backup. @@ -101,42 +101,42 @@ - enumerator GSM_Backup_LMB + enumerator GSM_Backup_LMB LMB format, compatible with Logo manager, can store phonebooks and logos. - enumerator GSM_Backup_VCalendar + enumerator GSM_Backup_VCalendar vCalendar standard, can store todo and calendar entries. - enumerator GSM_Backup_VCard + enumerator GSM_Backup_VCard vCard standard, can store phone phonebook entries. - enumerator GSM_Backup_LDIF + enumerator GSM_Backup_LDIF LDIF (LDAP Data Interchange Format), can store phone phonebook entries. - enumerator GSM_Backup_ICS + enumerator GSM_Backup_ICS iCalendar standard, can store todo and calendar entries. - enumerator GSM_Backup_Gammu + enumerator GSM_Backup_Gammu Gammu own format can store almost anything from phone. This is ASCII version of the format, Unicode strings are HEX encoded. Use GSM_Backup_GammuUCS2 instead if possible. @@ -144,7 +144,7 @@ - enumerator GSM_Backup_GammuUCS2 + enumerator GSM_Backup_GammuUCS2 Gammu own format can store almost anything from phone. This is UCS2-BE version of the format. @@ -152,7 +152,7 @@ - enumerator GSM_Backup_VNote + enumerator GSM_Backup_VNote vNote standard, can store phone notes. diff --git a/tests/data/examples/test_c_file/compare.xml b/tests/data/examples/test_c_file/compare.xml index d379fa38..e927c073 100644 --- a/tests/data/examples/test_c_file/compare.xml +++ b/tests/data/examples/test_c_file/compare.xml @@ -5,33 +5,33 @@ Defines - WRITE_TREE_MISSING_OK + WRITE_TREE_MISSING_OK bitmasks to write_cache_as_tree flags - WRITE_TREE_IGNORE_CACHE_TREE + WRITE_TREE_IGNORE_CACHE_TREE - WRITE_TREE_UNREADABLE_INDEX + WRITE_TREE_UNREADABLE_INDEX error return codes - WRITE_TREE_UNMERGED_INDEX + WRITE_TREE_UNMERGED_INDEX - WRITE_TREE_PREFIX_ERROR + WRITE_TREE_PREFIX_ERROR @@ -40,67 +40,67 @@ Functions - struct cache_tree *cache_treevoid + struct cache_tree *cache_treevoid - void cache_tree_freestruct cache_tree** + void cache_tree_freestruct cache_tree** - void cache_tree_invalidate_pathstruct cache_tree*const char* + void cache_tree_invalidate_pathstruct cache_tree*const char* - struct cache_tree_sub *cache_tree_substruct cache_tree*const char* + struct cache_tree_sub *cache_tree_substruct cache_tree*const char* - void cache_tree_writestruct strbuf*struct cache_tree *root + void cache_tree_writestruct strbuf*struct cache_tree *root - struct cache_tree *cache_tree_readconst char *bufferunsigned long size + struct cache_tree *cache_tree_readconst char *bufferunsigned long size - int cache_tree_fully_validstruct cache_tree* + int cache_tree_fully_validstruct cache_tree* - int cache_tree_updatestruct cache_tree*struct cache_entry**intintint + int cache_tree_updatestruct cache_tree*struct cache_entry**intintint - int write_cache_as_treeunsigned char *sha1int flagsconst char *prefix + int write_cache_as_treeunsigned char *sha1int flagsconst char *prefix - void prime_cache_treestruct cache_tree**struct tree* + void prime_cache_treestruct cache_tree**struct tree* - int cache_tree_matches_traversalstruct cache_tree*struct name_entry *entstruct traverse_info *info + int cache_tree_matches_traversalstruct cache_tree*struct name_entry *entstruct traverse_info *info @@ -109,7 +109,7 @@ Variables - struct cache_tree global_cache_tree + struct cache_tree global_cache_tree Shared cache tree instance. @@ -117,13 +117,13 @@ - struct cache_tree_sub + struct cache_tree_sub - struct cache_tree + struct cache_tree diff --git a/tests/data/examples/test_class/compare.xml b/tests/data/examples/test_class/compare.xml index 840a0ba4..cc779ce4 100644 --- a/tests/data/examples/test_class/compare.xml +++ b/tests/data/examples/test_class/compare.xml @@ -5,25 +5,25 @@ Functions - template<typename T>void f0 + template<typename T>void f0 - template<>void f0<std::string> + template<>void f0<std::string> - class OuterClass + class OuterClass class outside of namespace - class InnerClass + class InnerClass inner class @@ -32,14 +32,14 @@ - class ClassTest + class ClassTest class outside of namespace Public Functions - void functionint myParameter + void functionint myParameter non-namespaced class function More details in the header file. @@ -48,7 +48,7 @@ - void anotherFunction + void anotherFunction non-namespaced class other function More documentation in the impl file @@ -56,14 +56,14 @@ - virtual void publicFunction const = 0 + virtual void publicFunction const = 0 namespaced class function - virtual void undocumentedPublicFunction const = 0 + virtual void undocumentedPublicFunction const = 0 @@ -72,14 +72,14 @@ Protected Functions - inline void protectedFunction + inline void protectedFunction A protected function. - inline void undocumentedProtectedFunction + inline void undocumentedProtectedFunction @@ -88,93 +88,93 @@ Private Functions - virtual void privateFunction const = 0 + virtual void privateFunction const = 0 This is a private function. - virtual void undocumentedPrivateFunction const = 0 + virtual void undocumentedPrivateFunction const = 0 - class PrivateClass + class PrivateClass A private class. - struct PrivateStruct + struct PrivateStruct A private struct. - class ProtectedClass + class ProtectedClass A protected class. - struct ProtectedStruct + struct ProtectedStruct A protected struct. - class PublicClass + class PublicClass A public class. - struct PublicStruct + struct PublicStruct A public struct. - class UndocumentedPrivateClass + class UndocumentedPrivateClass - struct UndocumentedPrivateStruct + struct UndocumentedPrivateStruct - class UndocumentedProtectedClass + class UndocumentedProtectedClass - struct UndocumentedProtectedStruct + struct UndocumentedProtectedStruct - class UndocumentedPublicClass + class UndocumentedPublicClass - struct UndocumentedPublicStruct + struct UndocumentedPublicStruct @@ -182,25 +182,25 @@ - namespace TestNamespaceClasses + namespace TestNamespaceClasses - class ClassTest + class ClassTest second class inside of namespace Public Functions - inline void function + inline void function second namespaced class function - inline void anotherFunction + inline void anotherFunction second namespaced class other function @@ -210,27 +210,27 @@ - class NamespacedClassTest + class NamespacedClassTest first class inside of namespace Public Functions - virtual void function const = 0 + virtual void function const = 0 namespaced class function - inline explicit NamespacedClassTest + inline explicit NamespacedClassTest - inline void anotherFunction + inline void anotherFunction namespaced class other function @@ -240,7 +240,7 @@ Public Static Functions - static void functionS + static void functionS @@ -251,38 +251,38 @@ - namespace NS1 + namespace NS1 Functions - template<typename T>void f1 + template<typename T>void f1 - template<>void f1<std::string> + template<>void f1<std::string> - namespace NS2 + namespace NS2 Functions - template<typename T>void f2 + template<typename T>void f2 - template<>void f2<std::string> + template<>void f2<std::string> diff --git a/tests/data/examples/test_code_blocks/compare.xml b/tests/data/examples/test_code_blocks/compare.xml index ded91eba..982f7771 100644 --- a/tests/data/examples/test_code_blocks/compare.xml +++ b/tests/data/examples/test_code_blocks/compare.xml @@ -5,7 +5,7 @@ Functions - void with_standard_code_block + void with_standard_code_block A function with an unannotated code block with C/C++ code. char* buffer = new char[42]; @@ -14,7 +14,7 @@ int charsAdded = sprintf(buffer, "Tabs are normally %d spaces\n", 8); - void with_unannotated_cmake_code_block + void with_unannotated_cmake_code_block A function with an unannotated code block with non-C/C++ code. set(user_list A B C) @@ -26,7 +26,7 @@ endforeach() - void with_annotated_cmake_code_block + void with_annotated_cmake_code_block A function with an annotated cmake code block. set(user_list A B C) diff --git a/tests/data/examples/test_cpp_concept/compare.xml b/tests/data/examples/test_cpp_concept/compare.xml index 4402ebcc..37837504 100644 --- a/tests/data/examples/test_cpp_concept/compare.xml +++ b/tests/data/examples/test_cpp_concept/compare.xml @@ -3,7 +3,7 @@ - template<typename T>concept Hashable + template<typename T>concept Hashable diff --git a/tests/data/examples/test_cpp_enum/compare.xml b/tests/data/examples/test_cpp_enum/compare.xml index 13fe8b02..bda0222c 100644 --- a/tests/data/examples/test_cpp_enum/compare.xml +++ b/tests/data/examples/test_cpp_enum/compare.xml @@ -3,12 +3,12 @@ - enum Unscoped + enum Unscoped Values: - enumerator UnscopedEnumerator + enumerator UnscopedEnumerator @@ -16,12 +16,12 @@ - enum class ScopedStruct : int + enum class ScopedStruct : int Values: - enumerator Enumerator + enumerator Enumerator @@ -29,12 +29,12 @@ - enum class ScopedClass : int + enum class ScopedClass : int Values: - enumerator Enumerator + enumerator Enumerator @@ -42,12 +42,12 @@ - enum class ScopedClassNoUnderlying + enum class ScopedClassNoUnderlying Values: - enumerator Enumerator + enumerator Enumerator diff --git a/tests/data/examples/test_cpp_friendclass/compare.xml b/tests/data/examples/test_cpp_friendclass/compare.xml index 2878d599..f4fa1de5 100644 --- a/tests/data/examples/test_cpp_friendclass/compare.xml +++ b/tests/data/examples/test_cpp_friendclass/compare.xml @@ -3,19 +3,19 @@ - struct A + struct A - struct B + struct B - struct C + struct C Friends diff --git a/tests/data/examples/test_cpp_function/compare.xml b/tests/data/examples/test_cpp_function/compare.xml index c5e7814e..c6415527 100644 --- a/tests/data/examples/test_cpp_function/compare.xml +++ b/tests/data/examples/test_cpp_function/compare.xml @@ -3,37 +3,37 @@ - struct Foo + struct Foo - struct Class + struct Class Public Functions - virtual void f1 volatile const & = 0 + virtual void f1 volatile const & = 0 - virtual void f2 volatile const && = 0 + virtual void f2 volatile const && = 0 - int f_issue_338 noexcept + int f_issue_338 noexcept - int anon_paramsintintint xchar* + int anon_paramsintintint xchar* @@ -42,7 +42,7 @@ Public Members - void (*f_issue_489)(struct Foo *foo, int value) + void (*f_issue_489)(struct Foo *foo, int value) @@ -51,7 +51,7 @@ Public Static Functions - static void f3 + static void f3 @@ -60,14 +60,14 @@ - namespace TestNamespaceFunction + namespace TestNamespaceFunction A namespace to demonstrate a namespaced function. Functions - void namespaceFunc + void namespaceFunc A function within a namspace. diff --git a/tests/data/examples/test_cpp_inherited_members/compare.xml b/tests/data/examples/test_cpp_inherited_members/compare.xml index 2f288029..d614114a 100644 --- a/tests/data/examples/test_cpp_inherited_members/compare.xml +++ b/tests/data/examples/test_cpp_inherited_members/compare.xml @@ -3,7 +3,7 @@ - class Base + class Base Base class. Subclassed by A, B @@ -11,7 +11,7 @@ Public Functions - void f_issue_356 + void f_issue_356 Base-class member function. @@ -21,14 +21,14 @@ - class A : public Base + class A : public Base Class A. Public Functions - void f_issue_356 + void f_issue_356 Base-class member function. @@ -38,14 +38,14 @@ - class B : public Base + class B : public Base Class B. Public Functions - void f_issue_356 + void f_issue_356 Base-class member function. diff --git a/tests/data/examples/test_cpp_trailing_return_type/compare.xml b/tests/data/examples/test_cpp_trailing_return_type/compare.xml index fcadc152..2e22ee05 100644 --- a/tests/data/examples/test_cpp_trailing_return_type/compare.xml +++ b/tests/data/examples/test_cpp_trailing_return_type/compare.xml @@ -5,7 +5,7 @@ Functions - auto f_issue_441 -> Thingy* + auto f_issue_441 -> Thingy* Function that creates a thingy. @@ -13,7 +13,7 @@ - class Thingy + class Thingy needed for references in global function return type diff --git a/tests/data/examples/test_define/compare.xml b/tests/data/examples/test_define/compare.xml index c6e75e9c..7a9edf8e 100644 --- a/tests/data/examples/test_define/compare.xml +++ b/tests/data/examples/test_define/compare.xml @@ -3,21 +3,21 @@ - USE_STUFF + USE_STUFF A simple define without a value. - MAX_LENGTH + MAX_LENGTH A define with a simple value. - MAXIMUMAB + MAXIMUMAB A define with some parameters. @@ -45,7 +45,7 @@ - SWAPAB + SWAPAB A define which spans multiple lines. diff --git a/tests/data/examples/test_group/compare.xml b/tests/data/examples/test_group/compare.xml index 08456c26..d22a6c28 100644 --- a/tests/data/examples/test_group/compare.xml +++ b/tests/data/examples/test_group/compare.xml @@ -9,7 +9,7 @@ Functions - void groupedFunction + void groupedFunction This function is in MyGroup. @@ -17,14 +17,14 @@ - class GroupedClassTest + class GroupedClassTest first class inside of namespace Public Functions - virtual void publicFunction const = 0 + virtual void publicFunction const = 0 namespaced class function @@ -32,14 +32,14 @@ - class PublicClass + class PublicClass A protected class. - class UndocumentedPublicClass + class UndocumentedPublicClass @@ -53,14 +53,14 @@ This is an inner group. - class InnerGroupClassTest + class InnerGroupClassTest inner class inside of namespace Public Functions - inline void function + inline void function inner namespaced class function diff --git a/tests/data/examples/test_group_content_only/compare.xml b/tests/data/examples/test_group_content_only/compare.xml index f97acfbd..992bd661 100644 --- a/tests/data/examples/test_group_content_only/compare.xml +++ b/tests/data/examples/test_group_content_only/compare.xml @@ -3,7 +3,7 @@ - struct Structy + struct Structy Hello. diff --git a/tests/data/examples/test_headings/compare.xml b/tests/data/examples/test_headings/compare.xml index 1e23289b..bb85d72d 100644 --- a/tests/data/examples/test_headings/compare.xml +++ b/tests/data/examples/test_headings/compare.xml @@ -3,7 +3,7 @@ - class HeadingsTest + class HeadingsTest This is a documentation. This is more documentation. diff --git a/tests/data/examples/test_image/compare.xml b/tests/data/examples/test_image/compare.xml index b0b1a12b..2c670e71 100644 --- a/tests/data/examples/test_image/compare.xml +++ b/tests/data/examples/test_image/compare.xml @@ -3,11 +3,11 @@ - class ImageClass + class ImageClass This is a class with an image in the description. It renders like this: - + Breathe & Sphinx should automatically copy the image from the doxygen output directory into the _images folder of the Sphinx output. diff --git a/tests/data/examples/test_index/compare.xml b/tests/data/examples/test_index/compare.xml index b7a6d2b5..7fd5d615 100644 --- a/tests/data/examples/test_index/compare.xml +++ b/tests/data/examples/test_index/compare.xml @@ -3,13 +3,13 @@ - struct E + struct E Public Members - char F + char F @@ -18,13 +18,13 @@ - template<typename T>class J + template<typename T>class J Public Members - T K[H] + T K[H] @@ -33,7 +33,7 @@ Friends - static inline friend bool operator==J jaJ jb + static inline friend bool operator==J jaJ jb @@ -42,13 +42,13 @@ - template<typename T>struct L + template<typename T>struct L Public Static Attributes - static constexpr bool M = false + static constexpr bool M = false @@ -57,13 +57,13 @@ - template<typename T>struct L<J<T>> + template<typename T>struct L<J<T>> Public Static Attributes - static constexpr bool M = true + static constexpr bool M = true @@ -72,19 +72,19 @@ - template<typename T>concept N + template<typename T>concept N - namespace G + namespace G Typedefs - typedef void (*O)(B) + typedef void (*O)(B) @@ -93,13 +93,13 @@ Variables - constexpr unsigned int H = 12 + constexpr unsigned int H = 12 - int I + int I @@ -113,7 +113,7 @@ Defines - P + P @@ -122,7 +122,7 @@ Typedefs - using B = long + using B = long @@ -131,18 +131,18 @@ Enums - enum Q + enum Q Values: - enumerator R + enumerator R - enumerator S + enumerator S @@ -153,13 +153,13 @@ Functions - auto Cauto x -> A + auto Cauto x -> A - template<typename T>void D + template<typename T>void D @@ -168,7 +168,7 @@ Variables - int A + int A @@ -179,7 +179,7 @@ Defines - P + P @@ -188,7 +188,7 @@ Typedefs - using B = long + using B = long @@ -197,18 +197,18 @@ Enums - enum Q + enum Q Values: - enumerator R + enumerator R - enumerator S + enumerator S @@ -219,13 +219,13 @@ Functions - auto Cauto x -> A + auto Cauto x -> A - template<typename T>void D + template<typename T>void D @@ -234,20 +234,20 @@ Variables - int A + int A - struct E + struct E Public Members - char F + char F @@ -256,13 +256,13 @@ - namespace G + namespace G Typedefs - typedef void (*O)(B) + typedef void (*O)(B) @@ -271,26 +271,26 @@ Variables - constexpr unsigned int H = 12 + constexpr unsigned int H = 12 - int I + int I - template<typename T>class J + template<typename T>class J Public Members - T K[H] + T K[H] @@ -299,7 +299,7 @@ Friends - static inline friend bool operator==J jaJ jb + static inline friend bool operator==J jaJ jb @@ -308,13 +308,13 @@ - template<typename T>struct L + template<typename T>struct L Public Static Attributes - static constexpr bool M = false + static constexpr bool M = false @@ -323,13 +323,13 @@ - template<typename T>struct L<J<T>> + template<typename T>struct L<J<T>> Public Static Attributes - static constexpr bool M = true + static constexpr bool M = true diff --git a/tests/data/examples/test_inheritance/compare.xml b/tests/data/examples/test_inheritance/compare.xml index bf8530f2..4a7345c3 100644 --- a/tests/data/examples/test_inheritance/compare.xml +++ b/tests/data/examples/test_inheritance/compare.xml @@ -3,21 +3,21 @@ - class BaseA + class BaseA Subclassed by ChildV1, ChildV2, Main - class BaseB + class BaseB Subclassed by Main - class Main : public BaseA, private BaseB + class Main : public BaseA, private BaseB This is the main class we’re interested in. Subclassed by ChildA, ChildB @@ -25,33 +25,33 @@ - class ChildA : public Main + class ChildA : public Main - class ChildB : public Main + class ChildB : public Main - class ChildV1 : public virtual BaseA + class ChildV1 : public virtual BaseA Subclassed by ChildV3 - class ChildV2 : public virtual BaseA + class ChildV2 : public virtual BaseA Subclassed by ChildV3 - class ChildV3 : public ChildV1, private ChildV2 + class ChildV3 : public ChildV1, private ChildV2 diff --git a/tests/data/examples/test_inline/compare.xml b/tests/data/examples/test_inline/compare.xml index 3b6db6e1..5c2387b0 100644 --- a/tests/data/examples/test_inline/compare.xml +++ b/tests/data/examples/test_inline/compare.xml @@ -3,14 +3,14 @@ - class InlineTest + class InlineTest A class to demonstrate inline documentation syntax. Public Functions - const char *memberchar cint n + const char *memberchar cint n A member function. Details about member function diff --git a/tests/data/examples/test_latexmath/compare.xml b/tests/data/examples/test_latexmath/compare.xml index 424e6cc3..2db33309 100644 --- a/tests/data/examples/test_latexmath/compare.xml +++ b/tests/data/examples/test_latexmath/compare.xml @@ -3,7 +3,7 @@ - class MathHelper + class MathHelper A class. A inline formula: f(x) = a + b diff --git a/tests/data/examples/test_links/compare.xml b/tests/data/examples/test_links/compare.xml index 2e42e7a2..2da83e6a 100644 --- a/tests/data/examples/test_links/compare.xml +++ b/tests/data/examples/test_links/compare.xml @@ -3,7 +3,7 @@ - class LinksTest + class LinksTest first struct inside of namespace This is a longer description with a link to a webpage in the text http://www.github.com in order to test out Breathe’s handling of links. diff --git a/tests/data/examples/test_lists/compare.xml b/tests/data/examples/test_lists/compare.xml index cd1d2047..43fe0027 100644 --- a/tests/data/examples/test_lists/compare.xml +++ b/tests/data/examples/test_lists/compare.xml @@ -3,7 +3,7 @@ - class SimpleList_1 + class SimpleList_1 This is a list example. Following is a list using ‘+’ for bullets:One item.Two items.Three items.Four. @@ -12,7 +12,7 @@ - class SimpleList_2 + class SimpleList_2 This is a list example. Following is a list using ‘-’ for bullets:One item.Two items.Three items.Four. @@ -21,7 +21,7 @@ - class SimpleList_3 + class SimpleList_3 This is a list example. Following is a list using ‘*’ for bullets:One item.Two items.Three items.Four. @@ -30,7 +30,7 @@ - class SimpleList_4 + class SimpleList_4 This is a list example. Following is an auto-numbered list:One item.Two items.Three items.Four. @@ -39,7 +39,7 @@ - class SimpleList_5 + class SimpleList_5 This is a list example. Following is a numbered list:One item.Two items.Three items.Four. @@ -48,7 +48,7 @@ - class SimpleList_6 + class SimpleList_6 This is a list example. Following is an unordered list using ‘HTML’ tags: One item. Two items. Three items. Four. @@ -57,7 +57,7 @@ - class NestedLists_1 + class NestedLists_1 A list of events: mouse eventsmouse move eventmouse click eventMore info about the click event.mouse double click eventkeyboard eventskey down eventkey up event @@ -66,7 +66,7 @@ - class NestedLists_2 + class NestedLists_2 Text before the list. list item 1sub item 1sub sub item 1sub sub item 2 @@ -78,7 +78,7 @@ - class NestedLists_3 + class NestedLists_3 A list of events: mouse events mouse move event mouse click eventMore info about the click event.mouse double click event keyboard events key down event key up event More text here. @@ -86,7 +86,7 @@ - class NestedLists_4 + class NestedLists_4 A list of events: mouse eventsmouse move eventswipe eventcircle eventwave eventmouse click eventMore info about the click event.mouse double click eventkeyboard eventskey down eventkey up eventtouch eventspinch eventswipe event More text here. @@ -94,7 +94,7 @@ - class NestedLists_5 + class NestedLists_5 A deeply nested list of events: mouse eventsmouse move eventswipe eventswipe leftswipe rightcircle eventwave eventmouse click eventMore info about the click event.mouse double click eventkeyboard eventskey down eventkey up eventtouch eventspinch eventswipe event More text here. diff --git a/tests/data/examples/test_membergroups/compare.xml b/tests/data/examples/test_membergroups/compare.xml index 4227d3ae..f03f8936 100644 --- a/tests/data/examples/test_membergroups/compare.xml +++ b/tests/data/examples/test_membergroups/compare.xml @@ -3,21 +3,21 @@ - class GroupedMembers + class GroupedMembers demonstrates member groups myGroup - void in_mygroup_oneint myParameter + void in_mygroup_oneint myParameter A function. - void in_mygroup_twoint myParameter + void in_mygroup_twoint myParameter Another function. @@ -27,7 +27,7 @@ Public Functions - void not_in_mygroupint myParameter + void not_in_mygroupint myParameter This one is not in myGroup. diff --git a/tests/data/examples/test_param_dirs/compare.xml b/tests/data/examples/test_param_dirs/compare.xml index 71c57967..dc4310a6 100644 --- a/tests/data/examples/test_param_dirs/compare.xml +++ b/tests/data/examples/test_param_dirs/compare.xml @@ -3,7 +3,7 @@ - int processvoid *ivoid *ovoid *io + int processvoid *ivoid *ovoid *io diff --git a/tests/data/examples/test_python/compare-1.9.6.xml b/tests/data/examples/test_python/compare-1.9.6.xml new file mode 100644 index 00000000..ed39662c --- /dev/null +++ b/tests/data/examples/test_python/compare-1.9.6.xml @@ -0,0 +1,76 @@ + + + + + + module pyexample + + Documentation for this module. + More details. + + Functions + + + func + + Documentation for a function. + More details. + + + + + + class PyClass + + Documentation for a class. + More details. + + Public Functions + + + __init__self + + The constructor. + + + + + PyMethodself + + Documentation for a method. + + + Parameters + + self – The object pointer. + + + + + + + + Public Static Attributes + + + classVar = 0 + + A class variable. + + + + + Protected Attributes + + + _memVar + + a member variable + + + + + + + + diff --git a/tests/data/examples/test_python/compare.xml b/tests/data/examples/test_python/compare.xml index 553c1a81..cfb3cf13 100644 --- a/tests/data/examples/test_python/compare.xml +++ b/tests/data/examples/test_python/compare.xml @@ -3,7 +3,7 @@ - module pyexample + module pyexample Documentation for this module. More details. @@ -11,7 +11,7 @@ Functions - func + func Documentation for a function. More details. @@ -20,7 +20,7 @@ - class PyClass + class PyClass Documentation for a class. More details. @@ -28,14 +28,14 @@ Public Functions - __init__self + __init__self The constructor. - PyMethodself + PyMethodself Documentation for a method. @@ -53,17 +53,17 @@ Public Static Attributes - classVar = 0 + classVar = 0 A class variable. - - Protected Attributes + + Private Members - _memVar + _memVar a member variable diff --git a/tests/data/examples/test_qtsignalsandslots/compare.xml b/tests/data/examples/test_qtsignalsandslots/compare.xml index def81917..c46a4ff0 100644 --- a/tests/data/examples/test_qtsignalsandslots/compare.xml +++ b/tests/data/examples/test_qtsignalsandslots/compare.xml @@ -3,13 +3,13 @@ - class QtSignalSlotExample : public QObject + class QtSignalSlotExample : public QObject Public Functions - inline void workingFunctionint iShownParameter + inline void workingFunctionint iShownParameter @@ -26,7 +26,7 @@ Public Slots - inline void workingSlotint iShown + inline void workingSlotint iShown @@ -43,7 +43,7 @@ Signals - void workingSignalint iShown + void workingSignalint iShown diff --git a/tests/data/examples/test_rst/compare.xml b/tests/data/examples/test_rst/compare.xml index d05d0d8e..4084c728 100644 --- a/tests/data/examples/test_rst/compare.xml +++ b/tests/data/examples/test_rst/compare.xml @@ -3,14 +3,14 @@ - class TestClass + class TestClass first class inside of namespace Public Functions - virtual void function const = 0 + virtual void function const = 0 Inserting additional reStructuredText information. This is some funky non-XML compliant text: <& !>< @@ -24,7 +24,7 @@ - virtual void rawVerbatim const = 0 + virtual void rawVerbatim const = 0 Inserting additional reStructuredText information. @@ -34,7 +34,7 @@ - virtual void rawLeadingAsteriskVerbatim const = 0 + virtual void rawLeadingAsteriskVerbatim const = 0 Inserting additional reStructuredText information. Some example code:int example(int x) { @@ -44,7 +44,7 @@ - virtual void rawLeadingSlashesVerbatimint something const = 0 + virtual void rawLeadingSlashesVerbatimint something const = 0 Some kind of method. bool foo(bool something) { @@ -65,14 +65,14 @@ - virtual void rawInlineVerbatim const = 0 + virtual void rawInlineVerbatim const = 0 Inserting an inline reStructuredText snippet. Linking to another function: TestClass::rawVerbatim() - inline virtual void testFunction const + inline virtual void testFunction const Brief description. diff --git a/tests/data/examples/test_simplesect/compare.xml b/tests/data/examples/test_simplesect/compare.xml index b0da1155..82c32954 100644 --- a/tests/data/examples/test_simplesect/compare.xml +++ b/tests/data/examples/test_simplesect/compare.xml @@ -3,7 +3,7 @@ - template<typename T1, typename T2>void fint afloat bstd::string c + template<typename T1, typename T2>void fint afloat bstd::string c see, f_raw sa, f_raw Remarkremark, 1 Remarkremark, 2 Remarkremarks, 1 Remarkremarks, 2 par, something diff --git a/tests/data/examples/test_tables/compare.xml b/tests/data/examples/test_tables/compare.xml index a883b4d7..b1579520 100644 --- a/tests/data/examples/test_tables/compare.xml +++ b/tests/data/examples/test_tables/compare.xml @@ -3,7 +3,7 @@ - class Table_1 + class Table_1 This is a simple Markdown table example. Following is a simple table using Markdown syntax. @@ -13,7 +13,7 @@ - class Table_2 + class Table_2 This is a Markdown table with alignment. Following is a table with alignment using Markdown syntax. @@ -23,7 +23,7 @@ - class Table_3 + class Table_3 This is a Markdown table with rowspan and alignment. Following is a table with rowspan and alignment using Markdown syntax. @@ -33,7 +33,7 @@ - class Table_4 + class Table_4 This is a Markdown table with colspan and alignment. Following is a table with colspan and alignment using Markdown syntax. @@ -43,7 +43,7 @@ - class Table_5 + class Table_5 This is a Doxygen table. Following is a table using Doxygen syntax (and all supported features). diff --git a/tests/data/examples/test_template_class_non_type/compare.xml b/tests/data/examples/test_template_class_non_type/compare.xml index fae79d38..b6ddbf9e 100644 --- a/tests/data/examples/test_template_class_non_type/compare.xml +++ b/tests/data/examples/test_template_class_non_type/compare.xml @@ -3,7 +3,7 @@ - template<typename T, typename U, int N>class anothertemplateclass + template<typename T, typename U, int N>class anothertemplateclass a class with three template parameters @@ -28,14 +28,14 @@ Public Functions - inline anothertemplateclass + inline anothertemplateclass default constructor - inline anothertemplateclassT const &m1U const &m2 + inline anothertemplateclassT const &m1U const &m2 constructor with two template argument @@ -57,7 +57,7 @@ - U methodT const &t + U methodT const &t member accepting template argument and returning template argument diff --git a/tests/data/examples/test_template_function/compare.xml b/tests/data/examples/test_template_function/compare.xml index 2f5b7c4f..61d3bf4a 100644 --- a/tests/data/examples/test_template_function/compare.xml +++ b/tests/data/examples/test_template_function/compare.xml @@ -5,7 +5,7 @@ Functions - template<typename T>T function1T arg1 + template<typename T>T function1T arg1 a function with one template arguments @@ -32,7 +32,7 @@ - template<>std::string function1<std::string>std::string arg1 + template<>std::string function1<std::string>std::string arg1 a function with one template argument specialized for std::string @@ -53,7 +53,7 @@ - template<typename T, typename U, int N>T function2T arg1U arg2 + template<typename T, typename U, int N>T function2T arg1U arg2 a function with three template arguments @@ -97,7 +97,7 @@ - template<typename T = void, typename, int>void function3 + template<typename T = void, typename, int>void function3 a function with unnamed arguments and an argument with a default value diff --git a/tests/data/examples/test_template_type_alias/compare.xml b/tests/data/examples/test_template_type_alias/compare.xml index d7c6bb36..4bddda78 100644 --- a/tests/data/examples/test_template_type_alias/compare.xml +++ b/tests/data/examples/test_template_type_alias/compare.xml @@ -5,7 +5,7 @@ Typedefs - template<typename T>using IsFuzzy = std::is_fuzzy<T> + template<typename T>using IsFuzzy = std::is_fuzzy<T> a type alias with one template argument @@ -20,7 +20,7 @@ - template<typename T, typename U, int N>using IsFurry = std::is_furry<T, U, N> + template<typename T, typename U, int N>using IsFurry = std::is_furry<T, U, N> a type alias with three template arguments diff --git a/tests/data/examples/test_union/compare.xml b/tests/data/examples/test_union/compare.xml index 15950980..6ab6e049 100644 --- a/tests/data/examples/test_union/compare.xml +++ b/tests/data/examples/test_union/compare.xml @@ -3,21 +3,21 @@ - union SeparateUnion + union SeparateUnion A union of two values. Public Members - int size + int size The size of the thing. - float depth + float depth How deep it is. @@ -27,25 +27,25 @@ - namespace foo + namespace foo - union MyUnion + union MyUnion A union of two values. Public Members - int someInt + int someInt The int of it all. - float someFloat + float someFloat The float side of things. diff --git a/tests/data/examples/test_userdefined/compare.xml b/tests/data/examples/test_userdefined/compare.xml index 008da03e..5e66027e 100644 --- a/tests/data/examples/test_userdefined/compare.xml +++ b/tests/data/examples/test_userdefined/compare.xml @@ -3,7 +3,7 @@ - class UserDefinedGroupTest + class UserDefinedGroupTest A class. More details about the UserDefinedGroupTest class @@ -12,7 +12,7 @@ Description of custom group - void func1InCustomGroup + void func1InCustomGroup Function 1 in custom group. Details. @@ -20,7 +20,7 @@ - void func2InCustomGroup + void func2InCustomGroup Function 2 in custom group. Details. @@ -31,7 +31,7 @@ Public Functions - void func1InGroup1 + void func1InGroup1 Same documentation for both members. Details @@ -39,7 +39,7 @@ - void ungroupedFunction + void ungroupedFunction Function without group. Details. diff --git a/tests/data/examples/test_xrefsect/compare.xml b/tests/data/examples/test_xrefsect/compare.xml index 3e6a3e51..af7b2b2f 100644 --- a/tests/data/examples/test_xrefsect/compare.xml +++ b/tests/data/examples/test_xrefsect/compare.xml @@ -6,7 +6,7 @@ Functions - int unimplementedvoid + int unimplementedvoid An example of using Doxygen’s todo command. Todo:Implement this function. @@ -14,7 +14,7 @@ - void buggy_functionint param + void buggy_functionint param An example of using Doxygen’s bug and test commands. Bug:Does not work yet. @@ -23,7 +23,7 @@ - void old_functionvoid + void old_functionvoid An example of using Doxygen’s deprecated command. Deprecated:Should not be used on new code. @@ -31,7 +31,7 @@ - void sample_xrefitem_functionvoid + void sample_xrefitem_functionvoid An example of a custom Doxygen xrefitem declared as an ALIAS. xref Sample:This text shows up in the xref output. diff --git a/tests/test_examples.py b/tests/test_examples.py index afcbc346..fc7c72cb 100644 --- a/tests/test_examples.py +++ b/tests/test_examples.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from xml.parsers import expat import pytest import pathlib @@ -5,6 +7,12 @@ import shutil import enum import dataclasses +import sphinx + +if sphinx.version_info < (7, 2, 6): + from sphinx.testing.path import path as sphinx_path +else: + sphinx_path = pathlib.Path DOXYFILE_TEMPLATE = """ @@ -15,6 +23,7 @@ GENERATE_RTF = NO CASE_SENSE_NAMES = NO OUTPUT_DIRECTORY = "{output}" +IMAGE_PATH = "." QUIET = YES JAVADOC_AUTOBRIEF = YES GENERATE_HTML = NO @@ -159,7 +168,42 @@ def conf_overrides(extra): return conf -def compare_xml(generated, model): +def str_to_set(x): + return frozenset(x.split()) + + +def attr_compare(name, a, b): + if name == "classes": + return str_to_set(a) == str_to_set(b) + + return a == b + + +@dataclasses.dataclass +class VersionedFile: + file: str + version: tuple[int, ...] + + +def str_to_version(v_str): + return tuple(map(int, v_str.strip().split("."))) + + +def versioned_model(p): + fname = str(p) + return VersionedFile(fname, str_to_version(fname[len("compare-") : -len(".xml")])) + + +def compare_xml(generated, version): + alt_models = list(map(versioned_model, pathlib.Path(".").glob("compare-*.xml"))) + alt_models.sort(key=(lambda f: f.version), reverse=True) + + model = "compare.xml" + for alt_m in alt_models: + if version >= alt_m.version: + model = alt_m.file + break + event_str = { XMLEventType.E_START: "element start", XMLEventType.E_END: "element end", @@ -183,8 +227,8 @@ def compare_xml(generated, model): for key, value in c_node.attr.items(): assert key in o_node.attr, f"missing attribute at line {o_node.line_no}: {key}" o_value = o_node.attr[key] - assert ( - o_value == value + assert attr_compare( + key, o_value, value ), f'wrong value for attribute "{key}" at line {o_node.line_no}: expected "{value}", found "{o_value}"' elif o_type == XMLEventType.E_TEXT: assert ( @@ -192,50 +236,58 @@ def compare_xml(generated, model): ), f'wrong content at line {o_node.line_no}: expected "{c_node.value}", found "{o_node.value}"' +@pytest.fixture(scope="module") +def doxygen(): + exc = shutil.which("doxygen") + if exc is None: + raise ValueError("cannot find doxygen executable") + + r = subprocess.run( + [exc, "--version"], check=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True + ) + return VersionedFile(exc, str_to_version(r.stdout.split()[0])) + + @pytest.mark.parametrize("test_input", get_individual_tests()) -def test_example(make_app, tmp_path, test_input, monkeypatch): +def test_example(make_app, tmp_path, test_input, monkeypatch, doxygen): monkeypatch.chdir(test_input) - doxygen = shutil.which("doxygen") - if doxygen is None: - raise ValueError("cannot find doxygen executable") - doxyfile = tmp_path / "Doxyfile" doxycontent = DOXYFILE_TEMPLATE.format(output=tmp_path) - extra_opts = test_input / "extra_dox_opts.txt" + extra_opts = pathlib.Path("extra_dox_opts.txt") if extra_opts.exists(): doxycontent += extra_opts.read_text() doxyfile.write_text(doxycontent) (tmp_path / "conf.py").touch() - shutil.copyfile(test_input / "input.rst", tmp_path / "index.rst") + shutil.copyfile("input.rst", tmp_path / "index.rst") - subprocess.run([doxygen, doxyfile], check=True) + subprocess.run([doxygen.file, doxyfile], check=True) make_app( buildername="xml", - srcdir=tmp_path, + srcdir=sphinx_path(tmp_path), confoverrides=conf_overrides({"breathe_projects": {"example": str(tmp_path / "xml")}}), ).build() - compare_xml(tmp_path / "_build" / "xml" / "index.xml", test_input / "compare.xml") + compare_xml(tmp_path / "_build" / "xml" / "index.xml", doxygen.version) -def test_auto(make_app, tmp_path, monkeypatch): +def test_auto(make_app, tmp_path, monkeypatch, doxygen): test_input = TEST_DATA_DIR / "auto" monkeypatch.chdir(test_input) (tmp_path / "conf.py").touch() - shutil.copyfile(test_input / "input.rst", tmp_path / "index.rst") + shutil.copyfile("input.rst", tmp_path / "index.rst") make_app( buildername="xml", - srcdir=tmp_path, + srcdir=sphinx_path(tmp_path), confoverrides=conf_overrides( { "breathe_projects_source": { - "example": (test_input, ["auto_class.h", "auto_function.h"]) + "example": (str(test_input.absolute()), ["auto_class.h", "auto_function.h"]) } } ), ).build() - compare_xml(tmp_path / "_build" / "xml" / "index.xml", test_input / "compare.xml") + compare_xml(tmp_path / "_build" / "xml" / "index.xml", doxygen.version) diff --git a/xml_parser_generator/module_template.c.in b/xml_parser_generator/module_template.c.in index 0e24d605..a5b59beb 100644 --- a/xml_parser_generator/module_template.c.in +++ b/xml_parser_generator/module_template.c.in @@ -2,8 +2,7 @@ #include #ifdef PARSER_PY_LIMITED_API -/* Py_LIMITED_API isn't compatible with Py_TRACE_REFS */ -# if !defined(Py_TRACE_REFS) +# if !defined(Py_TRACE_REFS) && !defined(Py_DEBUG) && !defined(Py_REF_DEBUG) # define Py_LIMITED_API PARSER_PY_LIMITED_API # endif # define PARSER_PY_VERSION_HEX PARSER_PY_LIMITED_API From 925f1a13c0e069c4ce0473b963e4055b2d7e50af Mon Sep 17 00:00:00 2001 From: Rouslan Korneychuk Date: Thu, 14 Dec 2023 21:04:08 -0500 Subject: [PATCH 42/65] Compatibility fixes --- .github/workflows/unit_tests.yml | 4 ++++ breathe/renderer/__init__.py | 12 +++++++++--- tests/test_renderer.py | 5 ++++- 3 files changed, 17 insertions(+), 4 deletions(-) diff --git a/.github/workflows/unit_tests.yml b/.github/workflows/unit_tests.yml index 93d844df..8a6a55cb 100644 --- a/.github/workflows/unit_tests.yml +++ b/.github/workflows/unit_tests.yml @@ -23,6 +23,10 @@ jobs: - '6.1.3' - git+https://github.com/sphinx-doc/sphinx.git@master exclude: + # the git version isn't compatible with Python 3.8 + - python-version: '3.8' + sphinx-version: git+https://github.com/sphinx-doc/sphinx.git@master + # avoid bug in following configurations # sphinx/util/typing.py:37: in # from types import Union as types_Union diff --git a/breathe/renderer/__init__.py b/breathe/renderer/__init__.py index 9faacee3..4f24ed45 100644 --- a/breathe/renderer/__init__.py +++ b/breathe/renderer/__init__.py @@ -49,9 +49,15 @@ def format_parser_error( ] -class TaggedNode(NamedTuple, Generic[T_data_object]): - tag: str | None - value: T_data_object +if TYPE_CHECKING: + class TaggedNode(NamedTuple, Generic[T_data_object]): + tag: str | None + value: T_data_object +else: + # Python 3.9 and 3.10 don't allow multiple-inheritance with NamedTuple + class TaggedNode(NamedTuple): + tag: str | None + value: ... class RenderContext: diff --git a/tests/test_renderer.py b/tests/test_renderer.py index b91ad5c1..92b5f332 100644 --- a/tests/test_renderer.py +++ b/tests/test_renderer.py @@ -36,7 +36,10 @@ def __init__(self, app): env.temp_data["docname"] = "mock-doc" env.temp_data["breathe_project_info_factory"] = ProjectInfoFactory(app) env.temp_data["breathe_parser_factory"] = DoxygenParserFactory(app) - settings = frontend.get_default_settings(docutils.parsers.rst.Parser) + if hasattr(frontend, "get_default_settings"): + settings = frontend.get_default_settings(docutils.parsers.rst.Parser) + else: + settings = frontend.OptionParser(components=(docutils.parsers.rst.Parser,)).get_default_values() settings.env = env self.document = utils.new_document("", settings) From a6e1135c0eca435fcb5b31338b801d1692e242d7 Mon Sep 17 00:00:00 2001 From: Rouslan Korneychuk Date: Fri, 15 Dec 2023 01:33:12 -0500 Subject: [PATCH 43/65] A fix, new tests and linter/typing conformance --- breathe/filetypes.py | 2 +- breathe/renderer/__init__.py | 2 + breathe/renderer/sphinxrenderer.py | 309 +++++++++--------- setup.py | 1 + tests/data/examples/test_cpp_anon/compare.xml | 53 +++ tests/data/examples/test_cpp_anon/cpp_anon.h | 13 + tests/data/examples/test_cpp_anon/input.rst | 1 + tests/data/examples/test_diagrams/compare.xml | 183 +++++++++++ .../data/examples/test_diagrams/diagrams_a.h | 4 + .../data/examples/test_diagrams/diagrams_b.h | 5 + .../data/examples/test_diagrams/diagrams_c.h | 6 + .../data/examples/test_diagrams/diagrams_d.h | 7 + .../data/examples/test_diagrams/diagrams_e.h | 5 + .../examples/test_diagrams/extra_dox_opts.txt | 1 + tests/data/examples/test_diagrams/input.rst | 2 + tests/test_examples.py | 6 +- tests/test_renderer.py | 30 +- xml_parser_generator/make_parser.py | 1 + 18 files changed, 467 insertions(+), 164 deletions(-) create mode 100644 tests/data/examples/test_cpp_anon/compare.xml create mode 100644 tests/data/examples/test_cpp_anon/cpp_anon.h create mode 100644 tests/data/examples/test_cpp_anon/input.rst create mode 100644 tests/data/examples/test_diagrams/compare.xml create mode 100644 tests/data/examples/test_diagrams/diagrams_a.h create mode 100644 tests/data/examples/test_diagrams/diagrams_b.h create mode 100644 tests/data/examples/test_diagrams/diagrams_c.h create mode 100644 tests/data/examples/test_diagrams/diagrams_d.h create mode 100644 tests/data/examples/test_diagrams/diagrams_e.h create mode 100644 tests/data/examples/test_diagrams/extra_dox_opts.txt create mode 100644 tests/data/examples/test_diagrams/input.rst diff --git a/breathe/filetypes.py b/breathe/filetypes.py index 803b77fc..aff69548 100644 --- a/breathe/filetypes.py +++ b/breathe/filetypes.py @@ -14,7 +14,7 @@ def get_pygments_alias(filename: str) -> str | None: "Find first pygments alias from filename" try: lexer_cls = get_lexer_for_filename(filename) - return lexer_cls.aliases[0] + return lexer_cls.aliases[0] # type: ignore except ClassNotFound: return None diff --git a/breathe/renderer/__init__.py b/breathe/renderer/__init__.py index 4f24ed45..d0d6ccff 100644 --- a/breathe/renderer/__init__.py +++ b/breathe/renderer/__init__.py @@ -50,9 +50,11 @@ def format_parser_error( if TYPE_CHECKING: + class TaggedNode(NamedTuple, Generic[T_data_object]): tag: str | None value: T_data_object + else: # Python 3.9 and 3.10 don't allow multiple-inheritance with NamedTuple class TaggedNode(NamedTuple): diff --git a/breathe/renderer/sphinxrenderer.py b/breathe/renderer/sphinxrenderer.py index e399fb3d..515fca94 100644 --- a/breathe/renderer/sphinxrenderer.py +++ b/breathe/renderer/sphinxrenderer.py @@ -58,7 +58,6 @@ from sphinx.application import Sphinx from sphinx.directives import ObjectDescription - from docutils.nodes import Node, Element, TextElement class HasRefID(Protocol): @property @@ -279,7 +278,7 @@ class CSharpXRefRole(BaseObject, cs.CSharpXRefRole): class DomainDirectiveFactory: # A mapping from node kinds to domain directives and their names. - cpp_classes = { + cpp_classes: dict[str, tuple[Type[ObjectDescription], str]] = { "variable": (CPPMemberObject, "var"), "class": (CPPClassObject, "class"), "struct": (CPPClassObject, "struct"), @@ -298,7 +297,7 @@ class DomainDirectiveFactory: "enumvalue": (CPPEnumeratorObject, "enumerator"), "define": (CMacroObject, "macro"), } - c_classes = { + c_classes: dict[str, tuple[Type[ObjectDescription], str]] = { "variable": (CMemberObject, "var"), "function": (CFunctionObject, "function"), "define": (CMacroObject, "macro"), @@ -308,7 +307,7 @@ class DomainDirectiveFactory: "enumvalue": (CEnumeratorObject, "enumerator"), "typedef": (CTypeObject, "type"), } - python_classes = { + python_classes: dict[str, tuple[Type[ObjectDescription], str]] = { # TODO: PyFunction is meant for module-level functions # and PyAttribute is meant for class attributes, not module-level variables. # Somehow there should be made a distinction at some point to get the correct @@ -319,6 +318,7 @@ class DomainDirectiveFactory: "namespace": (PyClasslike, "class"), } + php_classes: dict[str, tuple[Type[ObjectDescription], str]] if php is not None: php_classes = { "function": (PHPNamespaceLevel, "function"), @@ -329,6 +329,7 @@ class DomainDirectiveFactory: } php_classes_default = php_classes["class"] # Directive when no matching ones were found + cs_classes: dict[str, tuple[Type[ObjectDescription], str]] if cs is not None: cs_classes = { # 'doxygen-name': (CSharp class, key in CSharpDomain.object_types) @@ -369,9 +370,9 @@ def create(domain: str, args) -> ObjectDescription: arg_0 = "global" if arg_0 in DomainDirectiveFactory.php_classes: - cls, name = DomainDirectiveFactory.php_classes[arg_0] # type: ignore + cls, name = DomainDirectiveFactory.php_classes[arg_0] else: - cls, name = DomainDirectiveFactory.php_classes_default # type: ignore + cls, name = DomainDirectiveFactory.php_classes_default elif cs is not None and domain == "cs": cls, name = DomainDirectiveFactory.cs_classes[args[0]] @@ -553,11 +554,11 @@ class NodeHandler(Generic[T]): """Dummy callable that associates a set of nodes to a function. This gets unwrapped by NodeVisitor and is never actually called.""" - def __init__(self, handler: Callable[[SphinxRenderer, T], list[Node]]): + def __init__(self, handler: Callable[[SphinxRenderer, T], list[nodes.Node]]): self.handler = handler self.nodes: set[type[parser.NodeOrValue]] = set() - def __call__(self, r: SphinxRenderer, node: T, /) -> list[Node]: # pragma: no cover + def __call__(self, r: SphinxRenderer, node: T, /) -> list[nodes.Node]: # pragma: no cover raise TypeError() @@ -565,18 +566,20 @@ class TaggedNodeHandler(Generic[T]): """Dummy callable that associates a set of nodes to a function. This gets unwrapped by NodeVisitor and is never actually called.""" - def __init__(self, handler: Callable[[SphinxRenderer, str, T], list[Node]]): + def __init__(self, handler: Callable[[SphinxRenderer, str, T], list[nodes.Node]]): self.handler = handler self.nodes: set[type[parser.NodeOrValue]] = set() - def __call__(self, r: SphinxRenderer, tag: str, node: T, /) -> list[Node]: # pragma: no cover + def __call__( + self, r: SphinxRenderer, tag: str, node: T, / + ) -> list[nodes.Node]: # pragma: no cover raise TypeError() def node_handler(node: type[parser.NodeOrValue]): def inner( - f: Callable[[SphinxRenderer, T], list[Node]] - ) -> Callable[[SphinxRenderer, T], list[Node]]: + f: Callable[[SphinxRenderer, T], list[nodes.Node]] + ) -> Callable[[SphinxRenderer, T], list[nodes.Node]]: handler: NodeHandler = f if isinstance(f, NodeHandler) else NodeHandler(f) handler.nodes.add(node) return handler @@ -586,8 +589,8 @@ def inner( def tagged_node_handler(node: type[parser.NodeOrValue]): def inner( - f: Callable[[SphinxRenderer, str, T], list[Node]] - ) -> Callable[[SphinxRenderer, str, T], list[Node]]: + f: Callable[[SphinxRenderer, str, T], list[nodes.Node]] + ) -> Callable[[SphinxRenderer, str, T], list[nodes.Node]]: handler: TaggedNodeHandler = f if isinstance(f, TaggedNodeHandler) else TaggedNodeHandler(f) handler.nodes.add(node) return handler @@ -640,12 +643,15 @@ class SphinxRenderer(metaclass=NodeVisitor): """ node_handlers: ClassVar[ - dict[type[parser.NodeOrValue], Callable[[SphinxRenderer, parser.NodeOrValue], list[Node]]] + dict[ + type[parser.NodeOrValue], + Callable[[SphinxRenderer, parser.NodeOrValue], list[nodes.Node]], + ] ] tagged_node_handlers: ClassVar[ dict[ type[parser.NodeOrValue], - Callable[[SphinxRenderer, str, parser.NodeOrValue], list[Node]], + Callable[[SphinxRenderer, str, parser.NodeOrValue], list[nodes.Node]], ] ] @@ -754,7 +760,7 @@ def join_nested_name(self, names: list[str]) -> str: def run_directive( self, obj_type: str, declaration: str, contentCallback: ContentCallback, options={} - ) -> list[Node]: + ) -> list[nodes.Node]: assert self.context is not None args = [obj_type, [declaration]] + self.context.directive_args[2:] directive = DomainDirectiveFactory.create(self.context.domain, args) @@ -808,7 +814,7 @@ def run_directive( signode = finder.declarator if self.context.child: - signode.children = [n for n in signode.children if not n.tagname == "desc_addname"] + signode.children = [n for n in signode.children if not n.tagname == "desc_addname"] # type: ignore return nodes def handle_compounddef_declaration( @@ -820,7 +826,7 @@ def handle_compounddef_declaration( new_context, parent_context, display_obj_type: str | None = None, - ) -> list[Node]: + ) -> list[nodes.Node]: def content(contentnode) -> None: if node.includes: for include in node.includes: @@ -844,7 +850,7 @@ def handle_declaration( display_obj_type: str | None = None, declarator_callback: DeclaratorCallback | None = None, options={}, - ) -> list[Node]: + ) -> list[nodes.Node]: if content_callback is None: def content(contentnode: addnodes.desc_content): @@ -1075,7 +1081,7 @@ def create_doxygen_target(self, node): refid = self.get_refid(node.id) return self.target_handler.create_target(refid) - def title(self, node) -> list[Node]: + def title(self, node) -> list[nodes.Node]: nodes_ = [] # Variable type or function return type @@ -1085,7 +1091,7 @@ def title(self, node) -> list[Node]: nodes_.append(addnodes.desc_name(text=node.name)) return nodes_ - def description(self, node: HasDescriptions) -> list[Node]: + def description(self, node: HasDescriptions) -> list[nodes.Node]: brief = self.render_optional(node.briefdescription) descr = node.detaileddescription if isinstance(node, parser.Node_memberdefType): @@ -1130,11 +1136,11 @@ def description(self, node: HasDescriptions) -> list[Node]: detailed = self.detaileddescription(descr) return brief + detailed - def detaileddescription(self, descr: parser.Node_descriptionType | None) -> list[Node]: + def detaileddescription(self, descr: parser.Node_descriptionType | None) -> list[nodes.Node]: detailedCand = self.render_optional(descr) # all field_lists must be at the top-level of the desc_content, so pull them up fieldLists: list[nodes.field_list] = [] - admonitions: list[Node] = [] + admonitions: list[nodes.Node] = [] def pullup(node, typ, dest): for n in list(node.findall(typ)): @@ -1148,11 +1154,9 @@ def pullup(node, typ, dest): pullup(candNode, nodes.warning, admonitions) # and collapse paragraphs for para in candNode.findall(nodes.paragraph): - if ( - para.parent - and len(para.parent) == 1 - and isinstance(para.parent, nodes.paragraph) - ): + parent = para.parent + assert parent is None or isinstance(parent, nodes.Element) + if parent and len(parent) == 1 and isinstance(parent, nodes.paragraph): para.replace_self(para.children) # and remove empty top-level paragraphs @@ -1195,7 +1199,7 @@ def pullup(node, typ, dest): bodyNodes.extend(fb[0]) items.append(nodes.paragraph("", "", *bodyNodes)) # only make a bullet list if there are multiple retvals - body: Node + body: nodes.Element if len(items) == 1: body = items[0] else: @@ -1263,8 +1267,8 @@ def render_declaration( return nodes @node_handler(parser.Node_DoxygenTypeIndex) - def visit_doxygen(self, node: parser.Node_DoxygenTypeIndex) -> list[Node]: - nodelist: list[Node] = [] + def visit_doxygen(self, node: parser.Node_DoxygenTypeIndex) -> list[nodes.Node]: + nodelist: list[nodes.Node] = [] # Process all the compound children for n in node.compound: @@ -1272,11 +1276,11 @@ def visit_doxygen(self, node: parser.Node_DoxygenTypeIndex) -> list[Node]: return nodelist @node_handler(parser.Node_DoxygenType) - def visit_doxygendef(self, node: parser.Node_DoxygenType) -> list[Node]: + def visit_doxygendef(self, node: parser.Node_DoxygenType) -> list[nodes.Node]: assert len(node.compounddef) == 1 return self.render(node.compounddef[0]) - def visit_union(self, node: HasRefID) -> list[Node]: + def visit_union(self, node: HasRefID) -> list[nodes.Node]: # Read in the corresponding xml file and process file_data = self.compound_parser.parse(node.refid) assert len(file_data.compounddef) == 1 @@ -1299,7 +1303,7 @@ def visit_union(self, node: HasRefID) -> list[Node]: ) return nodes - def visit_class(self, node: HasRefID) -> list[Node]: + def visit_class(self, node: HasRefID) -> list[nodes.Node]: # Read in the corresponding xml file and process file_data = self.compound_parser.parse(node.refid) assert len(file_data.compounddef) == 1 @@ -1367,7 +1371,7 @@ def visit_class(self, node: HasRefID) -> list[Node]: return nodes[1][1].children return nodes - def visit_namespace(self, node: HasRefID) -> list[Node]: + def visit_namespace(self, node: HasRefID) -> list[nodes.Node]: # Read in the corresponding xml file and process file_data = self.compound_parser.parse(node.refid) assert len(file_data.compounddef) == 1 @@ -1407,11 +1411,11 @@ def visit_compound( get_node_info: Callable[[parser.Node_DoxygenType], tuple[str, parser.DoxCompoundKind]] | None = None, render_signature: Callable[ - [parser.Node_DoxygenType, Sequence[Element], str, parser.DoxCompoundKind], - tuple[list[Node], addnodes.desc_content], + [parser.Node_DoxygenType, Sequence[nodes.Element], str, parser.DoxCompoundKind], + tuple[list[nodes.Node], addnodes.desc_content], ] | None = None, - ) -> list[Node]: + ) -> list[nodes.Node]: # Read in the corresponding xml file and process file_data = self.compound_parser.parse(node.refid) assert len(file_data.compounddef) == 1 @@ -1451,7 +1455,7 @@ def def_get_node_info(file_data) -> tuple[str, parser.DoxCompoundKind]: def def_render_signature( file_data: parser.Node_DoxygenType, doxygen_target, name, kind: parser.DoxCompoundKind - ) -> tuple[list[Node], addnodes.desc_content]: + ) -> tuple[list[nodes.Node], addnodes.desc_content]: # Defer to domains specific directive. assert len(file_data.compounddef) == 1 @@ -1512,10 +1516,10 @@ def def_render_signature( contentnode.extend(rendered_data) return nodes - def visit_file(self, node: parser.Node_CompoundType) -> list[Node]: + def visit_file(self, node: parser.Node_CompoundType) -> list[nodes.Node]: def render_signature( file_data, doxygen_target, name, kind - ) -> tuple[list[Node], addnodes.desc_content]: + ) -> tuple[list[nodes.Node], addnodes.desc_content]: assert self.context is not None options = self.context.directive_args[2] @@ -1597,22 +1601,22 @@ def render_signature( def render_iterable( self, iterable: Iterable[parser.NodeOrValue], tag: str | None = None - ) -> list[Node]: - output: list[Node] = [] + ) -> list[nodes.Node]: + output: list[nodes.Node] = [] for entry in iterable: output.extend(self.render(entry, tag=tag)) return output def render_tagged_iterable( self, iterable: Iterable[parser.TaggedValue[str, parser.NodeOrValue] | str] - ) -> list[Node]: - output: list[Node] = [] + ) -> list[nodes.Node]: + output: list[nodes.Node] = [] for entry in iterable: output.extend(self.render_tagged(entry)) return output @node_handler(parser.Node_compounddefType) - def visit_compounddef(self, node: parser.Node_compounddefType) -> list[Node]: + def visit_compounddef(self, node: parser.Node_compounddefType) -> list[nodes.Node]: assert self.context is not None options = self.context.directive_args[2] section_order = None @@ -1621,7 +1625,7 @@ def visit_compounddef(self, node: parser.Node_compounddefType) -> list[Node]: membergroup_order = None if "membergroups" in options: membergroup_order = {sec: i for i, sec in enumerate(options["membergroups"].split(" "))} - nodemap: dict[int, list[Node]] = {} + nodemap: dict[int, list[nodes.Node]] = {} def addnode(kind: str, lam): if section_order is None: @@ -1631,12 +1635,20 @@ def addnode(kind: str, lam): if "members-only" not in options: if "allow-dot-graphs" in options: - addnode("incdepgraph", lambda: self.render_optional(node.incdepgraph)) - addnode("invincdepgraph", lambda: self.render_optional(node.invincdepgraph)) - addnode("inheritancegraph", lambda: self.render_optional(node.inheritancegraph)) + addnode( + "incdepgraph", lambda: self.render_optional(node.incdepgraph, "incdepgraph") + ) + addnode( + "invincdepgraph", + lambda: self.render_optional(node.invincdepgraph, "invincdepgraph"), + ) + addnode( + "inheritancegraph", + lambda: self.render_optional(node.inheritancegraph, "inheritancegraph"), + ) addnode( "collaborationgraph", - lambda: self.render_optional(node.collaborationgraph), + lambda: self.render_optional(node.collaborationgraph, "collaborationgraph"), ) addnode("briefdescription", lambda: self.render_optional(node.briefdescription)) @@ -1660,7 +1672,7 @@ def render_derivedcompoundref(node): "derivedcompoundref", lambda: render_derivedcompoundref(node.derivedcompoundref) ) - section_nodelists: dict[str, list[Node]] = {} + section_nodelists: dict[str, list[nodes.Node]] = {} # Get all sub sections for sectiondef in node.sectiondef: @@ -1709,13 +1721,14 @@ def render_derivedcompoundref(node): section_titles = dict(sections) @node_handler(parser.Node_sectiondefType) - def visit_sectiondef(self, node: parser.Node_sectiondefType) -> list[Node]: + def visit_sectiondef(self, node: parser.Node_sectiondefType) -> list[nodes.Node]: assert self.context is not None options = self.context.directive_args[2] node_list = [] node_list.extend(self.render_optional(node.description)) # Get all the memberdef info + member_def: Iterable[parser.Node_memberdefType] if "sort" in options: member_def = sorted(node.memberdef, key=lambda x: x.name) else: @@ -1746,7 +1759,7 @@ def visit_sectiondef(self, node: parser.Node_sectiondefType) -> list[Node]: classes=["breathe-sectiondef-title"], ids=["breathe-section-title-" + idtext], ) - res: list[Node] = [rubric] + res: list[nodes.Node] = [rubric] return res + node_list return [] @@ -1754,8 +1767,8 @@ def visit_sectiondef(self, node: parser.Node_sectiondefType) -> list[Node]: @node_handler(parser.Node_refTextType) def visit_docreftext( self, node: parser.Node_docRefTextType | parser.Node_incType | parser.Node_refTextType - ) -> list[Node]: - nodelist: list[Node] + ) -> list[nodes.Node]: + nodelist: list[nodes.Node] if isinstance(node, parser.Node_incType): nodelist = self.render_iterable(node) @@ -1764,9 +1777,9 @@ def visit_docreftext( # TODO: "para" in compound.xsd is an empty tag; figure out what this # is supposed to do - for name, value in map(parser.tag_name_value, node): - if name == "para": - nodelist.extend(self.render(value)) + # for name, value in map(parser.tag_name_value, node): + # if name == "para": + # nodelist.extend(self.render(value)) refid = self.get_refid(node.refid or "") @@ -1785,7 +1798,7 @@ def visit_docreftext( return nodelist @node_handler(parser.Node_docHeadingType) - def visit_docheading(self, node: parser.Node_docHeadingType) -> list[Node]: + def visit_docheading(self, node: parser.Node_docHeadingType) -> list[nodes.Node]: """Heading renderer. Renders embedded headlines as emphasized text. Different heading levels @@ -1795,7 +1808,7 @@ def visit_docheading(self, node: parser.Node_docHeadingType) -> list[Node]: return [nodes.emphasis("", "", *nodelist)] @node_handler(parser.Node_docParaType) - def visit_docpara(self, node: parser.Node_docParaType) -> list[Node]: + def visit_docpara(self, node: parser.Node_docParaType) -> list[nodes.Node]: """ tags in the Doxygen output tend to contain either text or a single other tag of interest. So whilst it looks like we're combined descriptions and program listings and @@ -1813,7 +1826,7 @@ def visit_docpara(self, node: parser.Node_docParaType) -> list[Node]: contentNodeCands.extend(self.render_tagged(item)) # if there are consecutive nodes.Text we should collapse them # and rerender them to ensure the right paragraphifaction - contentNodes: list[Node] = [] + contentNodes: list[nodes.Node] = [] for n in contentNodeCands: if len(contentNodes) != 0 and isinstance(contentNodes[-1], nodes.Text): if isinstance(n, nodes.Text): @@ -1864,7 +1877,7 @@ def visit_docpara(self, node: parser.Node_docParaType) -> list[Node]: visit_docparblock = node_handler(parser.Node_docParBlockType)(render_iterable) @node_handler(parser.Node_docBlockQuoteType) - def visit_docblockquote(self, node: parser.Node_docBlockQuoteType) -> list[Node]: + def visit_docblockquote(self, node: parser.Node_docBlockQuoteType) -> list[nodes.Node]: nodelist = self.render_iterable(node) # catch block quote attributions here; the tag is the only identifier, # and it is nested within a subsequent tag @@ -1876,7 +1889,7 @@ def visit_docblockquote(self, node: parser.Node_docBlockQuoteType) -> list[Node] return [nodes.block_quote("", classes=[], *nodelist)] @node_handler(parser.Node_docImageType) - def visit_docimage(self, node: parser.Node_docImageType) -> list[Node]: + def visit_docimage(self, node: parser.Node_docImageType) -> list[nodes.Node]: """Output docutils image node using name attribute from xml as the uri""" path_to_image = node.name @@ -1889,16 +1902,16 @@ def visit_docimage(self, node: parser.Node_docImageType) -> list[Node]: return [nodes.image("", **options)] @node_handler(parser.Node_docURLLink) - def visit_docurllink(self, node: parser.Node_docURLLink) -> list[Node]: + def visit_docurllink(self, node: parser.Node_docURLLink) -> list[nodes.Node]: """Url Link Renderer""" nodelist = self.render_tagged_iterable(node) return [nodes.reference("", "", refuri=node.url, *nodelist)] @tagged_node_handler(parser.Node_docMarkupType) - def visit_docmarkup(self, tag: str, node: parser.Node_docMarkupType) -> list[Node]: + def visit_docmarkup(self, tag: str, node: parser.Node_docMarkupType) -> list[nodes.Node]: nodelist = self.render_tagged_iterable(node) - creator: Type[TextElement] = nodes.inline + creator: Type[nodes.TextElement] = nodes.inline if tag == "emphasis": creator = nodes.emphasis elif tag == "computeroutput": @@ -1920,7 +1933,7 @@ def visit_docmarkup(self, tag: str, node: parser.Node_docMarkupType) -> list[Nod @node_handler(parser.Node_docSect3Type) def visit_docsectN( self, node: parser.Node_docSect1Type | parser.Node_docSect2Type | parser.Node_docSect3Type - ) -> list[Node]: + ) -> list[nodes.Node]: """ Docutils titles are defined by their level inside the document so the proper structure is only guaranteed by the Doxygen XML. @@ -1937,7 +1950,7 @@ def visit_docsectN( return [section] @node_handler(parser.Node_docSimpleSectType) - def visit_docsimplesect(self, node: parser.Node_docSimpleSectType) -> list[Node]: + def visit_docsimplesect(self, node: parser.Node_docSimpleSectType) -> list[nodes.Node]: """Other Type documentation such as Warning, Note, Returns, etc""" # for those that should go into a field list, just render them as that, @@ -1984,8 +1997,8 @@ def visit_docsimplesect(self, node: parser.Node_docSimpleSectType) -> list[Node] visit_doctitle = node_handler(parser.Node_docTitleType)(render_tagged_iterable) @node_handler(parser.Node_docFormulaType) - def visit_docformula(self, node: parser.Node_docFormulaType) -> list[Node]: - nodelist: list[Node] = [] + def visit_docformula(self, node: parser.Node_docFormulaType) -> list[nodes.Node]: + nodelist: list[nodes.Node] = [] for latex in node: docname = self.state.document.settings.env.docname # Strip out the doxygen markup that slips through @@ -2008,8 +2021,8 @@ def visit_docformula(self, node: parser.Node_docFormulaType) -> list[Node]: return nodelist @node_handler(parser.Node_listingType) - def visit_listing(self, node: parser.Node_listingType) -> list[Node]: - nodelist: list[Node] = [] + def visit_listing(self, node: parser.Node_listingType) -> list[nodes.Node]: + nodelist: list[nodes.Node] = [] for i, item in enumerate(node.codeline): # Put new lines between the lines if i: @@ -2027,7 +2040,7 @@ def visit_listing(self, node: parser.Node_listingType) -> list[Node]: return [block] @node_handler(parser.Node_codelineType) - def visit_codeline(self, node: parser.Node_codelineType) -> list[Node]: + def visit_codeline(self, node: parser.Node_codelineType) -> list[nodes.Node]: return self.render_iterable(node.highlight) visit_highlight = node_handler(parser.Node_highlightType)(render_tagged_iterable) @@ -2056,7 +2069,7 @@ def _nested_inline_parse_with_titles(self, content, node) -> str: self.state.memo.title_styles = surrounding_title_styles self.state.memo.section_level = surrounding_section_level - def visit_verbatim(self, node: str) -> list[Node]: + def visit_verbatim(self, node: str) -> list[nodes.Node]: if not node.strip().startswith("embed:rst"): # Remove trailing new lines. Purely subjective call from viewing results text = node.rstrip() @@ -2104,7 +2117,7 @@ def visit_verbatim(self, node: str) -> list[Node]: rst.append(line, "") # Parent node for the generated node subtree - rst_node: Node + rst_node: nodes.Node if is_inline: rst_node = nodes.inline() else: @@ -2120,11 +2133,11 @@ def visit_verbatim(self, node: str) -> list[Node]: return [rst_node] @node_handler(parser.Node_incType) - def visit_inc(self, node: parser.Node_incType) -> list[Node]: + def visit_inc(self, node: parser.Node_incType) -> list[nodes.Node]: if not self.app.config.breathe_show_include: return [] - compound_link: list[Node] = [nodes.Text("".join(node))] + compound_link: list[nodes.Node] = [nodes.Text("".join(node))] if node.refid: compound_link = self.visit_docreftext(node) if node.local: @@ -2135,7 +2148,7 @@ def visit_inc(self, node: parser.Node_incType) -> list[Node]: return [nodes.container("", nodes.emphasis("", "", *text))] @node_handler(parser.Node_refType) - def visit_ref(self, node: parser.Node_refType) -> list[Node]: + def visit_ref(self, node: parser.Node_refType) -> list[nodes.Node]: def get_node_info(file_data: parser.Node_DoxygenType): name = "".join(node) name = name.rsplit("::", 1)[-1] @@ -2145,7 +2158,7 @@ def get_node_info(file_data: parser.Node_DoxygenType): return self.visit_compound(node, False, get_node_info=get_node_info) @node_handler(parser.Node_docListItemType) - def visit_doclistitem(self, node: parser.Node_docListItemType) -> list[Node]: + def visit_doclistitem(self, node: parser.Node_docListItemType) -> list[nodes.Node]: """List item renderer. Render all the children depth-first. Upon return expand the children node list into a docutils list-item. """ @@ -2154,11 +2167,11 @@ def visit_doclistitem(self, node: parser.Node_docListItemType) -> list[Node]: numeral_kind = ["arabic", "loweralpha", "lowerroman", "upperalpha", "upperroman"] - def render_unordered(self, children) -> list[Node]: + def render_unordered(self, children) -> list[nodes.Node]: nodelist_list = nodes.bullet_list("", *children) return [nodelist_list] - def render_enumerated(self, children, nesting_level) -> list[Node]: + def render_enumerated(self, children, nesting_level) -> list[nodes.Node]: nodelist_list = nodes.enumerated_list("", *children) idx = nesting_level % len(SphinxRenderer.numeral_kind) nodelist_list["enumtype"] = SphinxRenderer.numeral_kind[idx] @@ -2167,7 +2180,7 @@ def render_enumerated(self, children, nesting_level) -> list[Node]: return [nodelist_list] @tagged_node_handler(parser.Node_docListType) - def visit_doclist(self, tag: str, node: parser.Node_docListType) -> list[Node]: + def visit_doclist(self, tag: str, node: parser.Node_docListType) -> list[nodes.Node]: """List renderer The specifics of the actual list rendering are handled by the @@ -2185,8 +2198,8 @@ def visit_doclist(self, tag: str, node: parser.Node_docListType) -> list[Node]: return [] @node_handler(parser.Node_compoundRefType) - def visit_compoundref(self, node: parser.Node_compoundRefType) -> list[Node]: - nodelist: list[Node] = self.render_iterable(node) + def visit_compoundref(self, node: parser.Node_compoundRefType) -> list[nodes.Node]: + nodelist: list[nodes.Node] = self.render_iterable(node) refid = None if node.refid is not None: refid = self.get_refid(node.refid) @@ -2206,7 +2219,7 @@ def visit_compoundref(self, node: parser.Node_compoundRefType) -> list[Node]: return nodelist @node_handler(parser.Node_docXRefSectType) - def visit_docxrefsect(self, node: parser.Node_docXRefSectType) -> list[Node]: + def visit_docxrefsect(self, node: parser.Node_docXRefSectType) -> list[nodes.Node]: assert self.app.env is not None signode = addnodes.desc_signature() @@ -2236,8 +2249,8 @@ def visit_docxrefsect(self, node: parser.Node_docXRefSectType) -> list[Node]: return [descnode] @node_handler(parser.Node_docVariableListType) - def visit_docvariablelist(self, node: parser.Node_docVariableListType) -> list[Node]: - output: list[Node] = [] + def visit_docvariablelist(self, node: parser.Node_docVariableListType) -> list[nodes.Node]: + output: list[nodes.Node] = [] for n in node: descnode = addnodes.desc() descnode["objtype"] = "varentry" @@ -2252,15 +2265,15 @@ def visit_docvariablelist(self, node: parser.Node_docVariableListType) -> list[N return output @node_handler(parser.Node_docVarListEntryType) - def visit_docvarlistentry(self, node: parser.Node_docVarListEntryType) -> list[Node]: + def visit_docvarlistentry(self, node: parser.Node_docVarListEntryType) -> list[nodes.Node]: return self.render_tagged_iterable(node.term) @node_handler(parser.Node_docAnchorType) - def visit_docanchor(self, node: parser.Node_docAnchorType) -> list[Node]: + def visit_docanchor(self, node: parser.Node_docAnchorType) -> list[nodes.Node]: return list(self.create_doxygen_target(node)) @node_handler(parser.Node_docEntryType) - def visit_docentry(self, node: parser.Node_docEntryType) -> list[Node]: + def visit_docentry(self, node: parser.Node_docEntryType) -> list[nodes.Node]: col = nodes.entry() col += self.render_iterable(node.para) if node.thead: @@ -2272,11 +2285,11 @@ def visit_docentry(self, node: parser.Node_docEntryType) -> list[Node]: return [col] @node_handler(parser.Node_docRowType) - def visit_docrow(self, node: parser.Node_docRowType) -> list[Node]: + def visit_docrow(self, node: parser.Node_docRowType) -> list[nodes.Node]: row = nodes.row() cols = self.render_iterable(node.entry) elem: Union[nodes.thead, nodes.tbody] - if all(col.get("heading", False) for col in cols): + if all(cast(nodes.Element, col).get("heading", False) for col in cols): elem = nodes.thead() else: elem = nodes.tbody() @@ -2285,7 +2298,7 @@ def visit_docrow(self, node: parser.Node_docRowType) -> list[Node]: return [elem] @node_handler(parser.Node_docTableType) - def visit_doctable(self, node: parser.Node_docTableType) -> list[Node]: + def visit_doctable(self, node: parser.Node_docTableType) -> list[nodes.Node]: table = nodes.table() table["classes"] += ["colwidths-auto"] tgroup = nodes.tgroup(cols=node.cols) @@ -2300,9 +2313,10 @@ def visit_doctable(self, node: parser.Node_docTableType) -> list[Node]: # "envelop" rows there, namely thead and tbody (eg it will need to be updated # if Doxygen one day adds support for tfoot) - tags: dict[str, list] = {row.starttag(): [] for row in rows} + tags: dict[str, list] = {} for row in rows: - tags[row.starttag()].append(row.next_node()) + assert isinstance(row, nodes.Element) + tags[row.starttag()] = [row.next_node()] def merge_row_types(root, elem, elems): for node in elems: @@ -2320,7 +2334,7 @@ def merge_row_types(root, elem, elems): visit_linkedtext = node_handler(parser.Node_linkedTextType)(render_tagged_iterable) - def visit_function(self, node: parser.Node_memberdefType) -> list[Node]: + def visit_function(self, node: parser.Node_memberdefType) -> list[nodes.Node]: dom = self.get_domain() if not dom or dom in ("c", "cpp", "py", "cs"): names = self.get_qualification() @@ -2332,7 +2346,7 @@ def visit_function(self, node: parser.Node_memberdefType) -> list[Node]: declaration = " ".join( [ self.create_template_prefix(node), - "".join(cast(str, n.astext()) for n in self.render(node.type)), + "".join(n.astext() for n in self.render(node.type)), name, node.argsstring or "", ] @@ -2364,8 +2378,7 @@ def visit_function(self, node: parser.Node_memberdefType) -> list[Node]: elements.append(name) elements.append(node.argsstring or "") declaration = " ".join(elements) - nodes = self.handle_declaration(node, node.kind.value, declaration) - return nodes + return self.handle_declaration(node, node.kind.value, declaration) else: # Get full function signature for the domain directive. param_list = [] @@ -2400,7 +2413,7 @@ def visit_function(self, node: parser.Node_memberdefType) -> list[Node]: assert self.context is not None self.context.directive_args[1] = [signature] - nodes = self.run_domain_directive(node.kind, self.context.directive_args[1]) + nodes_ = self.run_domain_directive(node.kind, self.context.directive_args[1]) assert self.app.env is not None target = None @@ -2413,7 +2426,8 @@ def visit_function(self, node: parser.Node_memberdefType) -> list[Node]: "{}Doxygen target (old): {}".format(" " * _debug_indent, target[0]["ids"]) ) - rst_node = nodes[1] + rst_node = nodes_[1] + assert isinstance(rst_node, nodes.Element) doc = rst_node.document assert doc is not None finder = NodeFinder(doc) @@ -2428,9 +2442,9 @@ def visit_function(self, node: parser.Node_memberdefType) -> list[Node]: rst_node.children[0].insert(0, target) finder.content.extend(self.description(node)) - return nodes + return nodes_ - def visit_define(self, node: parser.Node_memberdefType) -> list[Node]: + def visit_define(self, node: parser.Node_memberdefType) -> list[nodes.Node]: declaration = node.name if node.param: declaration += "(" @@ -2451,7 +2465,7 @@ def add_definition(declarator: Declarator) -> None: node, node.kind.value, declaration, declarator_callback=add_definition ) - def visit_enum(self, node: parser.Node_memberdefType) -> list[Node]: + def visit_enum(self, node: parser.Node_memberdefType) -> list[nodes.Node]: def content(contentnode): contentnode.extend(self.description(node)) values = nodes.emphasis("", nodes.Text("Values:")) @@ -2478,7 +2492,7 @@ def content(contentnode): return self.handle_declaration(node, obj_type, declaration, content_callback=content) @node_handler(parser.Node_enumvalueType) - def visit_enumvalue(self, node: parser.Node_enumvalueType) -> list[Node]: + def visit_enumvalue(self, node: parser.Node_enumvalueType) -> list[nodes.Node]: if self.app.config.breathe_show_enumvalue_initializer: declaration = node.name + self.make_initializer(node) else: @@ -2489,7 +2503,7 @@ def content(contentnode: addnodes.desc_content): return self.handle_declaration(node, "enumvalue", declaration, content_callback=content) - def visit_typedef(self, node: parser.Node_memberdefType) -> list[Node]: + def visit_typedef(self, node: parser.Node_memberdefType) -> list[nodes.Node]: type_ = "".join(n.astext() for n in self.render(node.type)) names = self.get_qualification() names.append(node.name) @@ -2510,7 +2524,7 @@ def visit_typedef(self, node: parser.Node_memberdefType) -> list[Node]: def make_initializer(self, node) -> str: initializer = node.initializer - signature: list[Node] = [] + signature: list[nodes.Node] = [] if initializer: render_nodes = self.render(initializer) # Do not append separators for paragraphs. @@ -2523,7 +2537,7 @@ def make_initializer(self, node) -> str: signature.extend(render_nodes) return "".join(n.astext() for n in signature) - def visit_variable(self, node: parser.Node_memberdefType) -> list[Node]: + def visit_variable(self, node: parser.Node_memberdefType) -> list[nodes.Node]: names = self.get_qualification() names.append(node.name) name = self.join_nested_name(names) @@ -2573,7 +2587,7 @@ def visit_variable(self, node: parser.Node_memberdefType) -> list[Node]: else: return self.render_declaration(node, declaration) - def visit_friendclass(self, node: parser.Node_memberdefType) -> list[Node]: + def visit_friendclass(self, node: parser.Node_memberdefType) -> list[nodes.Node]: dom = self.get_domain() assert not dom or dom == "cpp" @@ -2600,8 +2614,8 @@ def visit_friendclass(self, node: parser.Node_memberdefType) -> list[Node]: def visit_templateparam( self, node: parser.Node_paramType, *, insertDeclNameByParsing: bool = False - ) -> list[Node]: - nodelist: list[Node] = [] + ) -> list[nodes.Node]: + nodelist: list[nodes.Node] = [] # Parameter type if node.type: @@ -2672,8 +2686,8 @@ def visit_templateparam( return nodelist @node_handler(parser.Node_templateparamlistType) - def visit_templateparamlist(self, node: parser.Node_templateparamlistType) -> list[Node]: - nodelist: list[Node] = [] + def visit_templateparamlist(self, node: parser.Node_templateparamlistType) -> list[nodes.Node]: + nodelist: list[nodes.Node] = [] self.output_defname = False for i, item in enumerate(node.param): if i: @@ -2683,7 +2697,7 @@ def visit_templateparamlist(self, node: parser.Node_templateparamlistType) -> li return nodelist @node_handler(parser.Node_docParamListType) - def visit_docparamlist(self, node: parser.Node_docParamListType) -> list[Node]: + def visit_docparamlist(self, node: parser.Node_docParamListType) -> list[nodes.Node]: """Parameter/Exception/TemplateParameter documentation""" # retval support available on Sphinx >= 4.3 @@ -2700,7 +2714,7 @@ def visit_docparamlist(self, node: parser.Node_docParamListType) -> list[Node]: for item in node: # TODO: does item.parameternamelist really have more than 1 parametername? assert len(item.parameternamelist) <= 1, item.parameternamelist - nameNodes: list[Node] = [] + nameNodes: list[nodes.Node] = [] parameterDirectionNodes = [] if len(item.parameternamelist) != 0: paramNameNodes = item.parameternamelist[0].parametername @@ -2749,7 +2763,7 @@ def visit_docparamlist(self, node: parser.Node_docParamListType) -> list[Node]: return [fieldList] @node_handler(parser.Node_docDotMscType) - def visit_docdot(self, node: parser.Node_docDotMscType) -> list[Node]: + def visit_docdot(self, node: parser.Node_docDotMscType) -> list[nodes.Node]: """Translate node from doxygen's dot command to sphinx's graphviz directive.""" graph_node = graphviz() str_value = "" @@ -2774,7 +2788,7 @@ def visit_docdot(self, node: parser.Node_docDotMscType) -> list[Node]: return [graph_node] @node_handler(parser.Node_docImageFileType) - def visit_docdotfile(self, node: parser.Node_docImageFileType) -> list[Node]: + def visit_docdotfile(self, node: parser.Node_docImageFileType) -> list[nodes.Node]: """Translate node from doxygen's dotfile command to sphinx's graphviz directive.""" dotcode = "" dot_file_path: str = node.name or "" @@ -2790,7 +2804,7 @@ def visit_docdotfile(self, node: parser.Node_docImageFileType) -> list[Node]: dot_file_path = os.path.abspath(project_path + os.sep + dot_file_path) else: dot_file_path = os.path.abspath( - self.app.confdir + os.sep + project_path + os.sep + dot_file_path + str(self.app.confdir) + os.sep + project_path + os.sep + dot_file_path ) try: with open(dot_file_path, encoding="utf-8") as fp: @@ -2815,7 +2829,7 @@ def visit_docdotfile(self, node: parser.Node_docImageFileType) -> list[Node]: return [graph_node] @tagged_node_handler(parser.Node_graphType) - def visit_docgraph(self, tag: str, node: parser.Node_graphType) -> list[Node]: + def visit_docgraph(self, tag: str, node: parser.Node_graphType) -> list[nodes.Node]: """Create a graph (generated by doxygen - not user-defined) from XML using dot syntax.""" @@ -2830,7 +2844,7 @@ def visit_docgraph(self, tag: str, node: parser.Node_graphType) -> list[Node]: direction = "back" caption = ( "This graph shows which files directly or indirectly " - + f" include {parent.compoundname}:" + + f"include {parent.compoundname}:" ) elif tag == "inheritancegraph": caption = f"Inheritance diagram for {parent.compoundname}:" @@ -2898,12 +2912,12 @@ def visit_docgraph(self, tag: str, node: parser.Node_graphType) -> list[Node]: caption_node = nodes.paragraph("", nodes.Text(caption)) return [caption_node, nodes.figure("", graph_node)] - def visit_unknown(self, node) -> list[Node]: + def visit_unknown(self, node) -> list[nodes.Node]: """Visit a node of unknown type.""" return [] @node_handler(parser.Node_CompoundType) - def dispatch_compound(self, node: parser.Node_CompoundType) -> list[Node]: + def dispatch_compound(self, node: parser.Node_CompoundType) -> list[nodes.Node]: """Dispatch handling of a compound node to a suitable visit method.""" if node.kind in [ parser.CompoundKind.file, @@ -2916,7 +2930,7 @@ def dispatch_compound(self, node: parser.Node_CompoundType) -> list[Node]: return self.visit_compound(node) @node_handler(parser.Node_memberdefType) - def dispatch_memberdef(self, node: parser.Node_memberdefType) -> list[Node]: + def dispatch_memberdef(self, node: parser.Node_memberdefType) -> list[nodes.Node]: """Dispatch handling of a memberdef node to a suitable visit method.""" if node.kind in ( parser.DoxMemberKind.function, @@ -2944,13 +2958,13 @@ def dispatch_memberdef(self, node: parser.Node_memberdefType) -> list[Node]: return self.render_declaration(node, update_signature=self.update_signature) @tagged_node_handler(str) - def visit_string(self, tag: str, node: str) -> list[Node]: + def visit_string(self, tag: str, node: str) -> list[nodes.Node]: if tag == "verbatim": return self.visit_verbatim(node) return self.render_string(node) @node_handler(str) - def render_string(self, node: str) -> list[Node]: + def render_string(self, node: str) -> list[nodes.Node]: # Skip any nodes that are pure whitespace # Probably need a better way to do this as currently we're only doing # it skip whitespace between higher-level nodes, but this will also @@ -2979,40 +2993,39 @@ def render_string(self, node: str) -> list[Node]: return [nodes.Text(node)] return [] - def render_tagged(self, item: parser.TaggedValue[str, parser.NodeOrValue] | str) -> list[Node]: + def render_tagged( + self, item: parser.TaggedValue[str, parser.NodeOrValue] | str + ) -> list[nodes.Node]: if isinstance(item, str): return self.render_string(item) - h = self.tagged_node_handlers.get(type(item.value)) - if h is not None: - assert self.context is not None - with WithContext(self, self.context.create_child_context(item.value, item.name)): - if not self.filter_(NodeStack(self.context.node_stack)): - return [] - return h(self, item.name, item.value) - return self.render(item.value) + return self.render(item.value, None, item.name) def render( self, node: parser.NodeOrValue, context: RenderContext | None = None, tag: str | None = None - ) -> list[Node]: + ) -> list[nodes.Node]: if context is None: assert self.context is not None context = self.context.create_child_context(node, tag) with WithContext(self, context): assert self.context is not None - result: list[Node] = [] - if not self.filter_(NodeStack(self.context.node_stack)): - pass - else: - method = self.node_handlers.get(type(node)) - if method is None: - assert type(node) not in self.tagged_node_handlers - method = SphinxRenderer.visit_unknown - result = method(self, node) + result: list[nodes.Node] = [] + if self.filter_(NodeStack(self.context.node_stack)): + tmethod = self.tagged_node_handlers.get(type(node)) + if tmethod is None: + method = self.node_handlers.get(type(node)) + if method is None: + method = SphinxRenderer.visit_unknown + result = method(self, node) + elif tag is None: + assert isinstance(node, str) + result = self.render_string(node) + else: + result = tmethod(self, tag, node) return result - def render_optional(self, node, tag: str | None = None) -> list[Node]: + def render_optional(self, node, tag: str | None = None) -> list[nodes.Node]: """Render a node that can be None.""" - return self.render(node) if node else [] + return self.render(node, None, tag) if node is not None else [] def setup(app: Sphinx) -> None: diff --git a/setup.py b/setup.py index 17da7530..a2743886 100644 --- a/setup.py +++ b/setup.py @@ -4,6 +4,7 @@ from setuptools import setup, find_packages, Extension from setuptools.command.build import build from setuptools.command.build_ext import build_ext + try: from setuptools.dep_util import newer_group except ImportError: diff --git a/tests/data/examples/test_cpp_anon/compare.xml b/tests/data/examples/test_cpp_anon/compare.xml new file mode 100644 index 00000000..989dbe3a --- /dev/null +++ b/tests/data/examples/test_cpp_anon/compare.xml @@ -0,0 +1,53 @@ + + + + + + struct ClassWithAnonEntities + + + Public Types + + + + + Values: + + + enumerator Enumerator + + + + + + + + Public Members + + + int structMember + + + + + + struct ClassWithAnonEntities + + + + + + int unionMember + + + + + + union ClassWithAnonEntities + + + + + + + diff --git a/tests/data/examples/test_cpp_anon/cpp_anon.h b/tests/data/examples/test_cpp_anon/cpp_anon.h new file mode 100644 index 00000000..c2d49600 --- /dev/null +++ b/tests/data/examples/test_cpp_anon/cpp_anon.h @@ -0,0 +1,13 @@ +struct ClassWithAnonEntities { + struct { + int structMember; + }; + + union { + int unionMember; + }; + + enum { + Enumerator + }; +}; diff --git a/tests/data/examples/test_cpp_anon/input.rst b/tests/data/examples/test_cpp_anon/input.rst new file mode 100644 index 00000000..d72cbebf --- /dev/null +++ b/tests/data/examples/test_cpp_anon/input.rst @@ -0,0 +1 @@ +.. doxygenfile:: cpp_anon.h diff --git a/tests/data/examples/test_diagrams/compare.xml b/tests/data/examples/test_diagrams/compare.xml new file mode 100644 index 00000000..7a5f66de --- /dev/null +++ b/tests/data/examples/test_diagrams/compare.xml @@ -0,0 +1,183 @@ + + + + + + class A + + Inheritance diagram for A: +
+ + +
+ Collaboration diagram for A: +
+ + +
+ Subclassed by C, D + + Public Members + + + A *m_self + + + + +
+
+ + + class B + + Inheritance diagram for B: +
+ + +
+ Collaboration diagram for B: +
+ + +
+ Subclassed by D + + Public Members + + + A *m_a + + + + +
+
+ + + class C : public A + + Inheritance diagram for C: +
+ + +
+ Collaboration diagram for C: +
+ + +
+ + Public Members + + + D *m_d + + + + +
+
+ + + class D : protected virtual A, private B + + Inheritance diagram for D: +
+ + +
+ Collaboration diagram for D: +
+ + +
+ Subclassed by E + + Public Members + + + C m_c + + + + +
+
+ + + class E : public D + + Inheritance diagram for E: +
+ + +
+ Collaboration diagram for E: +
+ + +
+
+
+ + file diagrams_a.h + + This graph shows which files directly or indirectly include diagrams_a.h: +
+ + +
+
+
+ + file diagrams_b.h + + This graph shows which files directly or indirectly include diagrams_b.h: +
+ + +
+
+
+ + file diagrams_c.h + + Include dependency graph for diagrams_c.h: +
+ + +
+ This graph shows which files directly or indirectly include diagrams_c.h: +
+ + +
+
+
+ + file diagrams_d.h + + Include dependency graph for diagrams_d.h: +
+ + +
+ This graph shows which files directly or indirectly include diagrams_d.h: +
+ + +
+
+
+ + file diagrams_e.h + + Include dependency graph for diagrams_e.h: +
+ + +
+
+
+
diff --git a/tests/data/examples/test_diagrams/diagrams_a.h b/tests/data/examples/test_diagrams/diagrams_a.h new file mode 100644 index 00000000..047a8ab5 --- /dev/null +++ b/tests/data/examples/test_diagrams/diagrams_a.h @@ -0,0 +1,4 @@ +#ifndef _DIAGRAMS_A_H +#define _DIAGRAMS_A_H +class A { public: A *m_self; }; +#endif diff --git a/tests/data/examples/test_diagrams/diagrams_b.h b/tests/data/examples/test_diagrams/diagrams_b.h new file mode 100644 index 00000000..5fcd2476 --- /dev/null +++ b/tests/data/examples/test_diagrams/diagrams_b.h @@ -0,0 +1,5 @@ +#ifndef _DIAGRAMS_B_H +#define _DIAGRAMS_B_H +class A; +class B { public: A *m_a; }; +#endif diff --git a/tests/data/examples/test_diagrams/diagrams_c.h b/tests/data/examples/test_diagrams/diagrams_c.h new file mode 100644 index 00000000..e4ec11d0 --- /dev/null +++ b/tests/data/examples/test_diagrams/diagrams_c.h @@ -0,0 +1,6 @@ +#ifndef _DIAGRAMS_C_H +#define _DIAGRAMS_C_H +#include "diagrams_c.h" +class D; +class C : public A { public: D *m_d; }; +#endif diff --git a/tests/data/examples/test_diagrams/diagrams_d.h b/tests/data/examples/test_diagrams/diagrams_d.h new file mode 100644 index 00000000..3e635cec --- /dev/null +++ b/tests/data/examples/test_diagrams/diagrams_d.h @@ -0,0 +1,7 @@ +#ifndef _DIAGRAM_D_H +#define _DIAGRAM_D_H +#include "diagrams_a.h" +#include "diagrams_b.h" +class C; +class D : virtual protected A, private B { public: C m_c; }; +#endif diff --git a/tests/data/examples/test_diagrams/diagrams_e.h b/tests/data/examples/test_diagrams/diagrams_e.h new file mode 100644 index 00000000..52823881 --- /dev/null +++ b/tests/data/examples/test_diagrams/diagrams_e.h @@ -0,0 +1,5 @@ +#ifndef _DIAGRAM_E_H +#define _DIAGRAM_E_H +#include "diagrams_d.h" +class E : public D {}; +#endif diff --git a/tests/data/examples/test_diagrams/extra_dox_opts.txt b/tests/data/examples/test_diagrams/extra_dox_opts.txt new file mode 100644 index 00000000..bccc1125 --- /dev/null +++ b/tests/data/examples/test_diagrams/extra_dox_opts.txt @@ -0,0 +1 @@ +ENABLE_PREPROCESSING = YES diff --git a/tests/data/examples/test_diagrams/input.rst b/tests/data/examples/test_diagrams/input.rst new file mode 100644 index 00000000..1d7e256e --- /dev/null +++ b/tests/data/examples/test_diagrams/input.rst @@ -0,0 +1,2 @@ +.. doxygenindex:: + :allow-dot-graphs: diff --git a/tests/test_examples.py b/tests/test_examples.py index fc7c72cb..3bda5cb2 100644 --- a/tests/test_examples.py +++ b/tests/test_examples.py @@ -9,6 +9,10 @@ import dataclasses import sphinx +from typing import Any + +sphinx_path: Any + if sphinx.version_info < (7, 2, 6): from sphinx.testing.path import path as sphinx_path else: @@ -35,7 +39,7 @@ """ C_FILE_SUFFIXES = frozenset((".h", ".c", ".hpp", ".cpp")) -IGNORED_ELEMENTS = frozenset(()) +IGNORED_ELEMENTS: frozenset[str] = frozenset(()) BUFFER_SIZE = 0x1000 diff --git a/tests/test_renderer.py b/tests/test_renderer.py index 92b5f332..895ff62c 100644 --- a/tests/test_renderer.py +++ b/tests/test_renderer.py @@ -39,7 +39,9 @@ def __init__(self, app): if hasattr(frontend, "get_default_settings"): settings = frontend.get_default_settings(docutils.parsers.rst.Parser) else: - settings = frontend.OptionParser(components=(docutils.parsers.rst.Parser,)).get_default_values() + settings = frontend.OptionParser( + components=(docutils.parsers.rst.Parser,) + ).get_default_values() settings.env = env self.document = utils.new_document("", settings) @@ -239,7 +241,7 @@ def test_render_func(app): argsstring="(int)", virt=parser.DoxVirtualKind.non_virtual, param=[parser.Node_paramType(type=parser.Node_linkedTextType(["int"]))], - **COMMON_ARGS_memberdefType + **COMMON_ARGS_memberdefType, ) signature = find_node(render(app, member_def), "desc_signature") assert signature.astext().startswith("void") @@ -266,7 +268,7 @@ def test_render_typedef(app): definition="typedef int foo", type=parser.Node_linkedTextType(["int"]), name="foo", - **COMMON_ARGS_memberdefType + **COMMON_ARGS_memberdefType, ) signature = find_node(render(app, member_def), "desc_signature") assert signature.astext() == "typedef int foo" @@ -278,7 +280,7 @@ def test_render_c_typedef(app): definition="typedef unsigned int bar", type=parser.Node_linkedTextType(["unsigned int"]), name="bar", - **COMMON_ARGS_memberdefType + **COMMON_ARGS_memberdefType, ) signature = find_node(render(app, member_def, domain="c"), "desc_signature") assert signature.astext() == "typedef unsigned int bar" @@ -291,7 +293,7 @@ def test_render_c_function_typedef(app): type=parser.Node_linkedTextType(["void* (*"]), name="voidFuncPtr", argsstring=")(float, int)", - **COMMON_ARGS_memberdefType + **COMMON_ARGS_memberdefType, ) signature = find_node(render(app, member_def, domain="c"), "desc_signature") assert signature.astext().startswith("typedef void *") @@ -312,7 +314,7 @@ def test_render_using_alias(app): definition="using foo = int", type=parser.Node_linkedTextType(["int"]), name="foo", - **COMMON_ARGS_memberdefType + **COMMON_ARGS_memberdefType, ) signature = find_node(render(app, member_def), "desc_signature") assert signature.astext() == "using foo = int" @@ -327,7 +329,7 @@ def test_render_const_func(app): argsstring="() const", virt=parser.DoxVirtualKind.non_virtual, const=True, - **COMMON_ARGS_memberdefType + **COMMON_ARGS_memberdefType, ) signature = find_node(render(app, member_def), "desc_signature") assert "_CPPv2NK1fEv" in signature["ids"] @@ -342,7 +344,7 @@ def test_render_lvalue_func(app): argsstring="() &", virt=parser.DoxVirtualKind.non_virtual, refqual=parser.DoxRefQualifierKind.lvalue, - **COMMON_ARGS_memberdefType + **COMMON_ARGS_memberdefType, ) signature = find_node(render(app, member_def), "desc_signature") assert signature.astext().endswith("&") @@ -357,7 +359,7 @@ def test_render_rvalue_func(app): argsstring="() &&", virt=parser.DoxVirtualKind.non_virtual, refqual=parser.DoxRefQualifierKind.rvalue, - **COMMON_ARGS_memberdefType + **COMMON_ARGS_memberdefType, ) signature = find_node(render(app, member_def), "desc_signature") assert signature.astext().endswith("&&") @@ -373,7 +375,7 @@ def test_render_const_lvalue_func(app): virt=parser.DoxVirtualKind.non_virtual, const=True, refqual=parser.DoxRefQualifierKind.lvalue, - **COMMON_ARGS_memberdefType + **COMMON_ARGS_memberdefType, ) signature = find_node(render(app, member_def), "desc_signature") assert signature.astext().endswith("const &") @@ -389,7 +391,7 @@ def test_render_const_rvalue_func(app): virt=parser.DoxVirtualKind.non_virtual, const=True, refqual=parser.DoxRefQualifierKind.rvalue, - **COMMON_ARGS_memberdefType + **COMMON_ARGS_memberdefType, ) signature = find_node(render(app, member_def), "desc_signature") assert signature.astext().endswith("const &&") @@ -402,7 +404,7 @@ def test_render_variable_initializer(app): type=parser.Node_linkedTextType(["const int"]), name="EOF", initializer=parser.Node_linkedTextType(["= -1"]), - **COMMON_ARGS_memberdefType + **COMMON_ARGS_memberdefType, ) signature = find_node(render(app, member_def), "desc_signature") assert signature.astext() == "const int EOF = -1" @@ -413,7 +415,7 @@ def test_render_define_initializer(app): kind=parser.DoxMemberKind.define, name="MAX_LENGTH", initializer=parser.Node_linkedTextType(["100"]), - **COMMON_ARGS_memberdefType + **COMMON_ARGS_memberdefType, ) signature_w_initializer = find_node( render(app, member_def, show_define_initializer=True), "desc_signature" @@ -424,7 +426,7 @@ def test_render_define_initializer(app): kind=parser.DoxMemberKind.define, name="MAX_LENGTH_NO_INITIALIZER", initializer=parser.Node_linkedTextType(["100"]), - **COMMON_ARGS_memberdefType + **COMMON_ARGS_memberdefType, ) signature_wo_initializer = find_node( diff --git a/xml_parser_generator/make_parser.py b/xml_parser_generator/make_parser.py index 616f52a1..e7e75373 100644 --- a/xml_parser_generator/make_parser.py +++ b/xml_parser_generator/make_parser.py @@ -89,6 +89,7 @@ def content_names(self) -> Iterable[str]: return [] if TYPE_CHECKING: + @property def py_name(self) -> str: raise NotImplementedError From 771ca626d47ae87da1e4b1730e649a2b5186e69d Mon Sep 17 00:00:00 2001 From: Rouslan Korneychuk Date: Fri, 15 Dec 2023 04:57:46 -0500 Subject: [PATCH 44/65] One small fix --- breathe/renderer/sphinxrenderer.py | 5 +++-- tests/data/examples/test_cpp_anon/compare.xml | 14 +++++++------- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/breathe/renderer/sphinxrenderer.py b/breathe/renderer/sphinxrenderer.py index 515fca94..93ad167f 100644 --- a/breathe/renderer/sphinxrenderer.py +++ b/breathe/renderer/sphinxrenderer.py @@ -1,6 +1,7 @@ from __future__ import annotations import os +from collections import defaultdict import sphinx from breathe import parser, filetypes @@ -2313,10 +2314,10 @@ def visit_doctable(self, node: parser.Node_docTableType) -> list[nodes.Node]: # "envelop" rows there, namely thead and tbody (eg it will need to be updated # if Doxygen one day adds support for tfoot) - tags: dict[str, list] = {} + tags: defaultdict[str, list] = defaultdict(list) for row in rows: assert isinstance(row, nodes.Element) - tags[row.starttag()] = [row.next_node()] + tags[row.starttag()].append(row.next_node()) def merge_row_types(root, elem, elems): for node in elems: diff --git a/tests/data/examples/test_cpp_anon/compare.xml b/tests/data/examples/test_cpp_anon/compare.xml index 989dbe3a..ac306132 100644 --- a/tests/data/examples/test_cpp_anon/compare.xml +++ b/tests/data/examples/test_cpp_anon/compare.xml @@ -7,12 +7,12 @@ Public Types - + - + enum [anonymous] Values: - + enumerator Enumerator @@ -29,9 +29,9 @@ - + - struct ClassWithAnonEntities + struct ClassWithAnonEntities::[anonymous] [anonymous] @@ -41,9 +41,9 @@ - + - union ClassWithAnonEntities + union ClassWithAnonEntities::[anonymous] [anonymous] From fd22f6b91303dbf6a4434c31fa642b9368017590 Mon Sep 17 00:00:00 2001 From: jce Date: Wed, 20 Dec 2023 12:04:39 +0100 Subject: [PATCH 45/65] Remove unused import --- breathe/finder/compound.py | 1 - 1 file changed, 1 deletion(-) diff --git a/breathe/finder/compound.py b/breathe/finder/compound.py index c48cdbd1..6b01cced 100644 --- a/breathe/finder/compound.py +++ b/breathe/finder/compound.py @@ -1,5 +1,4 @@ from breathe.finder import ItemFinder, stack -from breathe.parser.compound import compounddefTypeSub from breathe.renderer.filter import Filter, FilterFactory from breathe.parser import DoxygenCompoundParser From 86815a90f7948af3a8120853b616b85c4efe2fe7 Mon Sep 17 00:00:00 2001 From: Rouslan Korneychuk Date: Wed, 20 Dec 2023 17:52:04 -0500 Subject: [PATCH 46/65] Reorganized setup code and metadata --- MANIFEST.in | 9 ++ pyproject.toml | 47 +++++++ setup.cfg | 3 - setup.py | 144 +-------------------- xml_parser_generator/schema.json | 4 +- xml_parser_generator/setuptools_builder.py | 113 ++++++++++++++++ 6 files changed, 173 insertions(+), 147 deletions(-) create mode 100644 MANIFEST.in create mode 100644 xml_parser_generator/setuptools_builder.py diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 00000000..5e971bb6 --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,9 @@ +recursive-include xml_parser_generator *.py *.in *.json +include requirements/*.txt tests/*.py +graft tests/data +exclude breathe/_parser.pyi +prune scripts +prune examples +prune documentation +prune requirements +global-exclude *.py[cod] *.so *~ *.gitignore \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 283e1644..13e8b08d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,8 +1,55 @@ [build-system] requires = ["setuptools", "jinja2", "perfect-hash"] +build-backend = "setuptools.build_meta" + +[project] +name = "breathe" +description = "Sphinx Doxygen renderer" +authors = [{name = "Michael Jones", email = "m.pricejones@gmail.com"}] +license = {file = "LICENSE"} +readme = "README.rst" +classifiers = [ + "Development Status :: 4 - Beta", + "Environment :: Console", + "Environment :: Web Environment", + "Framework :: Sphinx :: Extension", + "Intended Audience :: Developers", + "Intended Audience :: Education", + "License :: OSI Approved :: BSD License", + "Operating System :: OS Independent", + "Programming Language :: Python :: 3", + "Topic :: Documentation", + "Topic :: Text Processing", + "Topic :: Utilities", +] + +# Keep in sync with breathe/__init__.py __version__ +version = "4.35.0" + +requires-python = ">=3.8" +dependencies = [ + "Sphinx>=4.0,!=5.0.0", + "docutils>=0.12" +] + +[project.urls] +homepage = "https://github.com/michaeljones/breathe" + +[project.scripts] +breathe-apidoc = "breathe.apidoc:main" + +[tool.setuptools.packages.find] +include = ["breathe*"] [tool.black] line-length = 100 extend-exclude = ''' ^/examples/.* | ^/tests/data/.* ''' + +[tool.cibuildwheel] +test-requires = "pytest" +test-command = "pytest {project}/tests" + +[tool.cibuildwheel.linux] +before-all = "yum install -y doxygen" diff --git a/setup.cfg b/setup.cfg index 6da0f658..d64f4357 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,6 +1,3 @@ -[metadata] -license_files = LICENSE - [flake8] max-line-length = 100 extend-ignore = E203, E231 diff --git a/setup.py b/setup.py index a2743886..5199ae9e 100644 --- a/setup.py +++ b/setup.py @@ -1,156 +1,23 @@ # -*- coding: utf-8 -*- import sys import os.path -from setuptools import setup, find_packages, Extension -from setuptools.command.build import build -from setuptools.command.build_ext import build_ext +from setuptools import setup, Extension -try: - from setuptools.dep_util import newer_group -except ImportError: - from distutils.dep_util import newer_group -from distutils import log -from distutils.dir_util import mkpath -from distutils.util import split_quoted # add xml_parser_generator to the import path list base_dir = os.path.dirname(os.path.realpath(__file__)) sys.path.insert(0, os.path.join(base_dir, "xml_parser_generator")) -import make_parser +from setuptools_builder import CustomBuild, CustomBuildExt -# Keep in sync with breathe/__init__.py __version__ -__version__ = "4.35.0" long_desc = """ Breathe is an extension to reStructuredText and Sphinx to be able to read and render `Doxygen `__ xml output. """ -requires = ["Sphinx>=4.0,!=5.0.0", "docutils>=0.12"] - -if sys.version_info < (3, 8): - print("ERROR: Breathe requires at least Python 3.8 to run.") - sys.exit(1) - - -extra_user_options = [ - ("cpp-opts=", None, "extra command line arguments for the compiler"), - ("ld-opts=", None, "extra command line arguments for the linker"), -] - - -class CustomBuild(build): - """Add extra parameters for 'build' to pass to 'build_ext'""" - - user_options = build.user_options + extra_user_options - - def initialize_options(self): - super().initialize_options() - self.cpp_opts = "" - self.ld_opts = "" - - def finalize_options(self): - super().finalize_options() - self.cpp_opts = split_quoted(self.cpp_opts) - self.ld_opts = split_quoted(self.ld_opts) - - -class CustomBuildExt(build_ext): - """Extend build_ext to automatically generate _parser.c""" - - user_options = build_ext.user_options + extra_user_options - - SCHEMA_FILE = os.path.join("xml_parser_generator", "schema.json") - MODULE_TEMPLATE = os.path.join("xml_parser_generator", "module_template.c.in") - STUBS_TEMPLATE = os.path.join("xml_parser_generator", "stubs_template.pyi.in") - MAKER_SOURCE = os.path.join("xml_parser_generator", "make_parser.py") - - DEPENDENCIES = [SCHEMA_FILE, MODULE_TEMPLATE, STUBS_TEMPLATE, MAKER_SOURCE] - - def initialize_options(self): - super().initialize_options() - self.cpp_opts = None - self.ld_opts = None - - def finalize_options(self): - if self.cpp_opts is not None: - self.cpp_opts = split_quoted(self.cpp_opts) - if self.ld_opts is not None: - self.ld_opts = split_quoted(self.ld_opts) - - self.set_undefined_options("build", ("cpp_opts", "cpp_opts"), ("ld_opts", "ld_opts")) - super().finalize_options() - - def build_extensions(self): - assert len(self.extensions) == 1 - - if not self.debug: - # The parser doesn't do any complicated calculation; its speed will - # mostly depend on file read and memory allocation speed. Thus it's - # better to optimize for size. - c = self.compiler.compiler_type - if c == "msvc": - self.extensions[0].extra_compile_args = ["/O1"] - elif c in {"unix", "cygwin", "mingw32"}: - self.extensions[0].extra_compile_args = ["-Os"] - self.extensions[0].extra_link_args = ["-s"] - - source = os.path.join(self.build_temp, self.extensions[0].name + ".c") - - # put the stub file in the same place that the extension module will be - ext_dest = self.get_ext_fullpath(self.extensions[0].name) - libdir = os.path.dirname(ext_dest) - stub = os.path.join(libdir, self.extensions[0].name + ".pyi") - - mkpath(self.build_temp, dry_run=self.dry_run) - mkpath(libdir, dry_run=self.dry_run) - - if ( - self.force - or newer_group(self.DEPENDENCIES, source) - or newer_group(self.DEPENDENCIES, stub) - ): - log.info(f'generating "{source}" and "{stub}" from templates') - if not self.dry_run: - make_parser.generate_from_json( - self.SCHEMA_FILE, self.MODULE_TEMPLATE, self.STUBS_TEMPLATE, source, stub - ) - else: - log.debug(f'"{source}" and "{stub}" are up-to-date') - - self.extensions[0].sources.append(source) - - super().build_extensions() - - setup( - name="breathe", - version=__version__, - url="https://github.com/michaeljones/breathe", - download_url="https://github.com/michaeljones/breathe", - license="BSD", - author="Michael Jones", - author_email="m.pricejones@gmail.com", - description="Sphinx Doxygen renderer", long_description=long_desc, - zip_safe=False, - classifiers=[ - "Development Status :: 4 - Beta", - "Environment :: Console", - "Environment :: Web Environment", - "Framework :: Sphinx :: Extension", - "Intended Audience :: Developers", - "Intended Audience :: Education", - "License :: OSI Approved :: BSD License", - "Operating System :: OS Independent", - "Programming Language :: Python :: 3", - "Topic :: Documentation", - "Topic :: Text Processing", - "Topic :: Utilities", - ], - platforms="any", - packages=find_packages(), ext_package="breathe", ext_modules=[ Extension( @@ -166,12 +33,5 @@ def build_extensions(self): py_limited_api=True, ) ], - include_package_data=True, - entry_points={ - "console_scripts": [ - "breathe-apidoc = breathe.apidoc:main", - ], - }, - install_requires=requires, cmdclass={"build": CustomBuild, "build_ext": CustomBuildExt}, ) diff --git a/xml_parser_generator/schema.json b/xml_parser_generator/schema.json index 91f66417..7c956af2 100644 --- a/xml_parser_generator/schema.json +++ b/xml_parser_generator/schema.json @@ -291,9 +291,9 @@ "allow_text": true, "attributes": { "compoundref": {"type": "#string", "optional": true}, - "endline": {"type": "#integer"}, + "endline": {"type": "#integer", "optional": true}, "refid": {"type": "#string"}, - "startline": {"type": "#integer"} + "startline": {"type": "#integer", "optional": true} } }, "docInternalType": { diff --git a/xml_parser_generator/setuptools_builder.py b/xml_parser_generator/setuptools_builder.py new file mode 100644 index 00000000..a126e6b0 --- /dev/null +++ b/xml_parser_generator/setuptools_builder.py @@ -0,0 +1,113 @@ +# -*- coding: utf-8 -*- + +import os.path + +try: + from setuptools.command.build import build +except ImportError: + from distutils.command.build import build + +from setuptools.command.build_ext import build_ext + +try: + from setuptools.modified import newer_group +except ImportError: + from distutils.dep_util import newer_group + +from distutils import log +from distutils.dir_util import mkpath +from distutils.util import split_quoted + + +import make_parser + + +extra_user_options = [ + ("cpp-opts=", None, "extra command line arguments for the compiler"), + ("ld-opts=", None, "extra command line arguments for the linker"), +] + + +class CustomBuild(build): + """Add extra parameters for 'build' to pass to 'build_ext'""" + + user_options = build.user_options + extra_user_options + + def initialize_options(self): + super().initialize_options() + self.cpp_opts = "" + self.ld_opts = "" + + def finalize_options(self): + super().finalize_options() + self.cpp_opts = split_quoted(self.cpp_opts) + self.ld_opts = split_quoted(self.ld_opts) + + +class CustomBuildExt(build_ext): + """Extend build_ext to automatically generate _parser.c""" + + user_options = build_ext.user_options + extra_user_options + + SCHEMA_FILE = os.path.join("xml_parser_generator", "schema.json") + MODULE_TEMPLATE = os.path.join("xml_parser_generator", "module_template.c.in") + STUBS_TEMPLATE = os.path.join("xml_parser_generator", "stubs_template.pyi.in") + MAKER_SOURCE = os.path.join("xml_parser_generator", "make_parser.py") + + DEPENDENCIES = [SCHEMA_FILE, MODULE_TEMPLATE, STUBS_TEMPLATE, MAKER_SOURCE] + + def initialize_options(self): + super().initialize_options() + self.cpp_opts = None + self.ld_opts = None + + def finalize_options(self): + if self.cpp_opts is not None: + self.cpp_opts = split_quoted(self.cpp_opts) + if self.ld_opts is not None: + self.ld_opts = split_quoted(self.ld_opts) + + self.set_undefined_options("build", ("cpp_opts", "cpp_opts"), ("ld_opts", "ld_opts")) + super().finalize_options() + + def build_extensions(self): + assert len(self.extensions) == 1 + + if not self.debug: + # The parser doesn't do any complicated calculation; its speed will + # mostly depend on file read and memory allocation speed. Thus it's + # better to optimize for size. + c = self.compiler.compiler_type + if c == "msvc": + self.extensions[0].extra_compile_args = ["/O1"] + elif c in {"unix", "cygwin", "mingw32"}: + self.extensions[0].extra_compile_args = ["-Os"] + self.extensions[0].extra_link_args = ["-s"] + + source = os.path.join(self.build_temp, self.extensions[0].name + ".c") + + # put the stub file in the same place that the extension module will be + ext_dest = self.get_ext_fullpath(self.extensions[0].name) + libdir = os.path.dirname(ext_dest) + stub = os.path.join(libdir, self.extensions[0].name + ".pyi") + + mkpath(self.build_temp, dry_run=self.dry_run) + mkpath(libdir, dry_run=self.dry_run) + + if ( + self.force + or newer_group(self.DEPENDENCIES, source) + or newer_group(self.DEPENDENCIES, stub) + ): + log.info(f'generating "{source}" and "{stub}" from templates') + if not self.dry_run: + make_parser.generate_from_json( + self.SCHEMA_FILE, self.MODULE_TEMPLATE, self.STUBS_TEMPLATE, source, stub + ) + else: + log.debug(f'"{source}" and "{stub}" are up-to-date') + + self.extensions[0].sources.append(source) + + super().build_extensions() + From 9323b9f2bce7b3205b43471cc24e1c46f8d8682b Mon Sep 17 00:00:00 2001 From: jce Date: Thu, 21 Dec 2023 13:47:10 +0100 Subject: [PATCH 47/65] Style fix --- breathe/parser/compound.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/breathe/parser/compound.py b/breathe/parser/compound.py index 6313c776..1489d60b 100644 --- a/breathe/parser/compound.py +++ b/breathe/parser/compound.py @@ -243,6 +243,8 @@ class MemberTypeSub(supermod.MemberType): def __init__(self, kind=None, refid=None, name=''): supermod.MemberType.__init__(self, kind, refid, name) + + supermod.MemberType.subclass = MemberTypeSub # end class MemberTypeSub From 4071ffcd41c50fbbc50b31c1171214ad731d2af8 Mon Sep 17 00:00:00 2001 From: jce Date: Thu, 21 Dec 2023 13:57:14 +0100 Subject: [PATCH 48/65] Check type of object to prevent AttributeError --- breathe/finder/compound.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/breathe/finder/compound.py b/breathe/finder/compound.py index 6b01cced..d7017e4d 100644 --- a/breathe/finder/compound.py +++ b/breathe/finder/compound.py @@ -1,6 +1,7 @@ from breathe.finder import ItemFinder, stack from breathe.renderer.filter import Filter, FilterFactory from breathe.parser import DoxygenCompoundParser +from breathe.parser.compoundsuper import memberdefType from sphinx.application import Sphinx @@ -62,7 +63,8 @@ def filter_(self, ancestors, filter_: Filter, matches) -> None: # If there are members in this sectiondef that match the criteria # then load up the file for the group they're in and get the member data objects if member_matches: - matched_member_ids = (member.id for stack in matches for member in stack) + matched_member_ids = {member.id for stack in matches for member in stack + if isinstance(member, memberdefType)} member_refid = member_matches[0][0].refid filename = member_refid.rsplit('_', 1)[0] file_data = self.compound_parser.parse(filename) From 5c3b6fb587208e174be460cb46905c94e3401d53 Mon Sep 17 00:00:00 2001 From: jce Date: Thu, 21 Dec 2023 17:43:54 +0100 Subject: [PATCH 49/65] Don't warn about duplicate memberdef with the same signature --- breathe/directives/function.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/breathe/directives/function.py b/breathe/directives/function.py index b46c54a1..28712a18 100644 --- a/breathe/directives/function.py +++ b/breathe/directives/function.py @@ -286,7 +286,7 @@ def _resolve_function(self, matches, args: Optional[cpp.ASTParametersQualifiers] res.append((entry, signature)) - if len(res) == 1: + if len(res) == 1 or all(x == candSignatures[0] for x in candSignatures): return res[0][0] else: raise _UnableToResolveFunctionError(candSignatures) From 5f5f7ceb75dfddc2ca089a1cbd5c9fb804ae7744 Mon Sep 17 00:00:00 2001 From: jce Date: Fri, 22 Dec 2023 12:58:03 +0100 Subject: [PATCH 50/65] Prevent IndexError when no result was found due to arg spec --- breathe/directives/function.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/breathe/directives/function.py b/breathe/directives/function.py index 28712a18..4f60ab6e 100644 --- a/breathe/directives/function.py +++ b/breathe/directives/function.py @@ -286,7 +286,7 @@ def _resolve_function(self, matches, args: Optional[cpp.ASTParametersQualifiers] res.append((entry, signature)) - if len(res) == 1 or all(x == candSignatures[0] for x in candSignatures): + if len(res) == 1 or (len(res) > 1 and all(x[1] == res[0][1] for x in res)): return res[0][0] else: raise _UnableToResolveFunctionError(candSignatures) From 85689603f122077337b4a2e7fd53e976121d4304 Mon Sep 17 00:00:00 2001 From: Rouslan Korneychuk Date: Sun, 24 Dec 2023 00:56:02 -0500 Subject: [PATCH 51/65] Refactored "finders", eliminating huge bottleneck --- .github/workflows/cache_doxygen.yml | 2 +- breathe/apidoc.py | 6 +- breathe/directives/__init__.py | 31 +- breathe/directives/class_like.py | 13 +- breathe/directives/content_block.py | 85 ++- breathe/directives/file.py | 146 ++++- breathe/directives/function.py | 54 +- breathe/directives/index.py | 48 +- breathe/directives/item.py | 115 +++- breathe/directives/setup.py | 8 +- breathe/finder/__init__.py | 28 +- breathe/finder/compound.py | 24 +- breathe/finder/factory.py | 86 ++- breathe/finder/index.py | 32 +- breathe/parser.py | 142 ++-- breathe/renderer/filter.py | 715 +++++++-------------- breathe/renderer/sphinxrenderer.py | 23 +- pyproject.toml | 10 +- tests/test_filters.py | 69 +- tests/test_renderer.py | 27 +- xml_parser_generator/setuptools_builder.py | 1 - 21 files changed, 885 insertions(+), 780 deletions(-) diff --git a/.github/workflows/cache_doxygen.yml b/.github/workflows/cache_doxygen.yml index 6c36ad8d..914c86e5 100644 --- a/.github/workflows/cache_doxygen.yml +++ b/.github/workflows/cache_doxygen.yml @@ -1,4 +1,4 @@ -name: download and compile Doxygen +name: download and cache Doxygen on: workflow_call env: diff --git a/breathe/apidoc.py b/breathe/apidoc.py index c0a5db2b..41c7c61d 100644 --- a/breathe/apidoc.py +++ b/breathe/apidoc.py @@ -78,11 +78,7 @@ def write_file(name, text, args): def format_heading(level, text): """Create a heading of [1, 2 or 3 supported].""" - underlining = [ - "=", - "-", - "~", - ][ + underlining = ["=", "-", "~",][ level - 1 ] * len(text) return "%s\n%s\n\n" % (text, underlining) diff --git a/breathe/directives/__init__.py b/breathe/directives/__init__.py index 2306e7fc..6431a739 100644 --- a/breathe/directives/__init__.py +++ b/breathe/directives/__init__.py @@ -1,24 +1,25 @@ from __future__ import annotations -from breathe.finder.factory import FinderFactory +from breathe.finder import factory from breathe import parser from breathe.renderer import format_parser_error, RenderContext -from breathe.renderer.filter import FilterFactory from breathe.renderer.sphinxrenderer import SphinxRenderer from sphinx.directives import SphinxDirective # pyright: ignore from docutils import nodes -from typing import Any, TYPE_CHECKING, Sequence +from typing import Any, TYPE_CHECKING if TYPE_CHECKING: - from breathe.parser import DoxygenParserFactory + from breathe.parser import DoxygenParser from breathe.project import ProjectInfoFactory, ProjectInfo from breathe.renderer import TaggedNode from breathe.renderer.filter import DoxFilter from breathe.renderer.mask import MaskFactoryBase from breathe.renderer.target import TargetHandler + from sphinx.application import Sphinx + from collections.abc import Sequence class _WarningHandler: @@ -66,16 +67,20 @@ def project_info_factory(self) -> ProjectInfoFactory: return self.env.temp_data["breathe_project_info_factory"] @property - def parser_factory(self) -> DoxygenParserFactory: - return self.env.temp_data["breathe_parser_factory"] + def dox_parser(self) -> DoxygenParser: + return self.env.temp_data["breathe_dox_parser"] @property - def finder_factory(self) -> FinderFactory: - return FinderFactory(self.env.app, self.parser_factory) + def app(self) -> Sphinx: + return self.env.app - @property - def filter_factory(self) -> FilterFactory: - return FilterFactory(self.env.app) + def get_doxygen_index(self, project_info: ProjectInfo) -> parser.DoxygenIndex: + return self.dox_parser.parse_index(project_info) + + def create_finder_from_root( + self, root: factory.FinderRoot, project_info: ProjectInfo + ) -> factory.Finder: + return factory.create_finder_from_root(self.env.app, self.dox_parser, root, project_info) def create_warning(self, project_info: ProjectInfo | None, **kwargs) -> _WarningHandler: if project_info: @@ -101,13 +106,13 @@ def render( try: object_renderer = SphinxRenderer( - self.parser_factory.app, + self.dox_parser.app, project_info, [tn.value for tn in node_stack], self.state, self.state.document, target_handler, - self.parser_factory.create_compound_parser(project_info), + self.dox_parser, filter_, ) except parser.ParserError as e: diff --git a/breathe/directives/class_like.py b/breathe/directives/class_like.py index 39b67985..c6a7f62b 100644 --- a/breathe/directives/class_like.py +++ b/breathe/directives/class_like.py @@ -3,6 +3,7 @@ from breathe.directives import BaseDirective from breathe.file_state_cache import MTimeError from breathe.project import ProjectError +from breathe.renderer import filter from breathe.renderer.mask import NullMaskFactory from breathe.renderer.target import create_target_handler @@ -17,7 +18,6 @@ from typing import NotRequired, TypedDict else: from typing_extensions import NotRequired, TypedDict - from breathe import renderer from docutils.nodes import Node DoxClassOptions = TypedDict( @@ -74,22 +74,21 @@ def run(self) -> list[Node]: return warning.warn("doxygen{kind}: %s" % e) try: - finder = self.finder_factory.create_finder(project_info) + d_index = self.get_doxygen_index(project_info) except MTimeError as e: warning = self.create_warning(None, kind=self.kind) return warning.warn("doxygen{kind}: %s" % e) - finder_filter = self.filter_factory.create_compound_finder_filter(name, self.kind) - - matches: list[list[renderer.TaggedNode]] = [] - finder.filter_(finder_filter, matches) + matches: list[filter.FinderMatch] = list( + filter.compound_finder_filter(name, self.kind, d_index) + ) if len(matches) == 0: warning = self.create_warning(project_info, name=name, kind=self.kind) return warning.warn('doxygen{kind}: Cannot find class "{name}" {tail}') target_handler = create_target_handler(options, project_info, self.state.document) - filter_ = self.filter_factory.create_class_filter(name, options) + filter_ = filter.create_class_filter(self.app, name, options) mask_factory = NullMaskFactory() return self.render( diff --git a/breathe/directives/content_block.py b/breathe/directives/content_block.py index a0a3265f..7e74c858 100644 --- a/breathe/directives/content_block.py +++ b/breathe/directives/content_block.py @@ -3,7 +3,7 @@ from breathe.directives import BaseDirective from breathe.file_state_cache import MTimeError from breathe.project import ProjectError -from breathe.renderer import RenderContext +from breathe.renderer import RenderContext, filter from breathe.renderer.mask import NullMaskFactory from breathe.renderer.sphinxrenderer import SphinxRenderer from breathe.renderer.target import create_target_handler @@ -21,8 +21,8 @@ from typing import NotRequired, TypedDict else: from typing_extensions import NotRequired, TypedDict - from breathe.renderer import TaggedNode from breathe.finder.factory import FinderRoot + from sphinx.application import Sphinx DoxContentBlockOptions = TypedDict( "DoxContentBlockOptions", @@ -46,6 +46,66 @@ FinderRoot = None +def create_render_filter( + app: Sphinx, kind: Literal["group", "page", "namespace"], options: DoxContentBlockOptions +) -> filter.DoxFilter: + """Render filter for group & namespace blocks""" + + filter_options = filter.set_defaults(app, options) + + if "desc-only" in filter_options: + return filter.create_description_filter(True, parser.Node_compounddefType) + + cm_filter = filter.create_class_member_filter(filter_options) + ic_filter = filter.create_innerclass_filter(filter_options) + o_filter = filter.create_outline_filter(filter_options) + + def filter_(nstack: filter.NodeStack) -> bool: + grandparent = nstack.ancestor(2) + return ( + ( + cm_filter(nstack) + or ( + isinstance(grandparent, parser.Node_compounddefType) + and grandparent.kind not in filter.CLASS_LIKE_COMPOUNDDEF + and isinstance(nstack.node, parser.Node_memberdefType) + ) + ) + and ic_filter(nstack) + and o_filter(nstack) + ) + + return filter_ + + +def create_content_filter(kind: Literal["group", "page", "namespace"]) -> filter.DoxFilter: + """Returns a filter which matches the contents of the or namespace but not the group or + namepace name or description. + + This allows the groups to be used to structure sections of the documentation rather than to + structure and further document groups of documentation + + As a finder/content filter we only need to match exactly what we're interested in. + """ + + def filter_(nstack: filter.NodeStack) -> bool: + node = nstack.node + parent = nstack.parent + + if isinstance(node, parser.Node_memberdefType): + return node.prot == parser.DoxProtectionKind.public + + return ( + isinstance(node, parser.Node_refType) + and isinstance(parent, parser.Node_compounddefType) + and parent.kind.value == kind + and nstack.tag == "innerclass" + and node.prot == parser.DoxProtectionKind.public + ) + + return filter_ + + class _DoxygenContentBlockDirective(BaseDirective): """Base class for namespace and group directives which have very similar behaviours""" @@ -79,15 +139,14 @@ def run(self) -> list[Node]: return warning.warn("doxygen{kind}: %s" % e) try: - finder = self.finder_factory.create_finder(project_info) + d_index = self.get_doxygen_index(project_info) except MTimeError as e: warning = self.create_warning(None, kind=self.kind) return warning.warn("doxygen{kind}: %s" % e) - finder_filter = self.filter_factory.create_finder_filter(self.kind, name) - - matches: list[list[TaggedNode]] = [] - finder.filter_(finder_filter, matches) + matches: list[filter.FinderMatch] = list( + filter.compound_finder_filter(name, self.kind, d_index) + ) # It shouldn't be possible to have too many matches as namespaces & groups in their nature # are merged together if there are multiple declarations, so we only check for no matches @@ -99,32 +158,32 @@ def run(self) -> list[Node]: # Unpack the single entry in the matches list (node_stack,) = matches - filter_ = self.filter_factory.create_content_filter(self.kind, options) + filter_ = create_content_filter(self.kind) # Having found the compound node for the namespace or group in the index we want to grab # the contents of it which match the filter - contents_finder = self.finder_factory.create_finder_from_root( + contents_finder = self.create_finder_from_root( cast(FinderRoot, node_stack[0].value), project_info ) - contents: list[list[TaggedNode]] = [] + contents: list[filter.FinderMatch] = [] contents_finder.filter_(filter_, contents) # Replaces matches with our new starting points matches = contents target_handler = create_target_handler(options, project_info, self.state.document) - filter_ = self.filter_factory.create_render_filter(self.kind, options) + filter_ = create_render_filter(self.app, self.kind, options) node_list: list[Node] = [] for node_stack in matches: object_renderer = SphinxRenderer( - self.parser_factory.app, + self.dox_parser.app, project_info, [item.value for item in node_stack], self.state, self.state.document, target_handler, - self.parser_factory.create_compound_parser(project_info), + self.dox_parser, filter_, ) diff --git a/breathe/directives/file.py b/breathe/directives/file.py index 1644ab33..71bf5e2a 100644 --- a/breathe/directives/file.py +++ b/breathe/directives/file.py @@ -1,15 +1,139 @@ from __future__ import annotations +import os.path + from ..renderer.mask import NullMaskFactory from ..directives import BaseDirective -from breathe import project +from breathe import project, path_handler, renderer, parser -from breathe import renderer, parser from breathe.renderer.sphinxrenderer import SphinxRenderer from breathe.renderer.target import create_target_handler +from breathe.renderer import filter from docutils.parsers.rst.directives import unchanged_required, flag +from typing import Any, TYPE_CHECKING + +if TYPE_CHECKING: + from collections.abc import Iterable, Mapping + from docutils.nodes import Node + + +def path_matches(location: str, target_file: str) -> bool: + if path_handler.includes_directory(target_file): + # If the target_file contains directory separators then + # match against the same length at the end of the location + # + location_match = location[-len(target_file) :] + return location_match == target_file + + # If there are no separators, match against the whole filename + # at the end of the location + # + # This is to prevent "Util.cpp" matching "PathUtil.cpp" + # + location_basename = os.path.basename(location) + return location_basename == target_file + + +def location_matches(location: parser.Node_locationType | None, target_file: str) -> bool: + return location is not None and path_matches(location.file, target_file) + + +def namespace_matches(name: str, node: parser.Node_compounddefType): + to_find = name.rpartition("::")[0] + return any(to_find == "".join(ns) for ns in node.innernamespace) or any( + to_find == "".join(ns) for ns in node.innerclass + ) + + +def create_file_filter( + filename: str, + options: Mapping[str, Any], + *, + init_valid_names: Iterable[str] | None = None, +) -> filter.DoxFilter: + valid_names: set[str] = set() + if init_valid_names: + valid_names.update(init_valid_names) + + outline_filter = filter.create_outline_filter(options) + + def filter_(nstack: filter.NodeStack) -> bool: + if not outline_filter(nstack): + return False + + node = nstack.node + parent = nstack.parent + if isinstance(node, parser.Node_compounddefType): + if node.kind == parser.DoxCompoundKind.file: + # Gather the "namespaces" attribute from the + # compounddef for the file we're rendering and + # store the information in the "valid_names" list + if location_matches(node.location, filename): + valid_names.update("".join(ns) for ns in node.innernamespace) + valid_names.update("".join(ns) for ns in node.innerclass) + + if node.kind != parser.DoxCompoundKind.namespace: + # Ignore compounddefs which are from another file + # (normally means classes and structs which are in a + # namespace that we have other interests in) but only + # check it if the compounddef is not a namespace + # itself, as for some reason compounddefs for + # namespaces are registered with just a single file + # location even if they namespace is spread over + # multiple files + return location_matches(node.location, filename) + + elif isinstance(node, parser.Node_refType): + name = "".join(node) + if isinstance(parent, parser.Node_compounddefType) and nstack.tag in { + "innerclass", + "innernamespace", + }: + # Take the valid_names and every time we handle an + # innerclass or innernamespace, check that its name + # was one of those initial valid names so that we + # never end up rendering a namespace or class that + # wasn't in the initial file. Notably this is + # required as the location attribute for the + # namespace in the xml is unreliable. + if name not in valid_names: + return False + + # Ignore innerclasses and innernamespaces that are inside a + # namespace that is going to be rendered as they will be + # rendered with that namespace and we don't want them twice + if namespace_matches(name, parent): + return False + + elif isinstance(node, parser.Node_memberdefType): + # Ignore memberdefs from files which are different to + # the one we're rendering. This happens when we have to + # cross into a namespace xml file which has entries + # from multiple files in it + return path_matches(node.location.file, filename) + + return True + + return filter_ + + +def file_finder_filter( + filename: str, + d_parser: parser.DoxygenParser, + project_info: project.ProjectInfo, + index: parser.DoxygenIndex, + matches: list[filter.FinderMatch], +) -> None: + for c in index.file_compounds: + if not path_matches(c.name, filename): + continue + for cd in d_parser.parse_compound(c.refid, project_info).root.compounddef: + if cd.kind != parser.DoxCompoundKind.file: + continue + matches.append([renderer.TaggedNode(None, cd)]) + class _BaseFileDirective(BaseDirective): """Base class handle the main work when given the appropriate file and project info to work @@ -20,12 +144,10 @@ class _BaseFileDirective(BaseDirective): # information is present in the Directive class from the docutils framework that we'd have to # pass way too much stuff to a helper object to be reasonable. - def handle_contents(self, file_: str, project_info): - finder = self.finder_factory.create_finder(project_info) - finder_filter = self.filter_factory.create_file_finder_filter(file_) - - matches: list[list[renderer.TaggedNode]] = [] - finder.filter_(finder_filter, matches) + def handle_contents(self, file_: str, project_info: project.ProjectInfo) -> list[Node]: + d_index = self.get_doxygen_index(project_info) + matches: list[filter.FinderMatch] = [] + file_finder_filter(file_, self.dox_parser, project_info, d_index, matches) if len(matches) > 1: warning = self.create_warning(None, file=file_, directivename=self.directive_name) @@ -35,18 +157,18 @@ def handle_contents(self, file_: str, project_info): return warning.warn('{directivename}: Cannot find file "{file} {tail}') target_handler = create_target_handler(self.options, project_info, self.state.document) - filter_ = self.filter_factory.create_file_filter(file_, self.options) + filter_ = create_file_filter(file_, self.options) - node_list = [] + node_list: list[Node] = [] for node_stack in matches: object_renderer = SphinxRenderer( - self.parser_factory.app, + self.dox_parser.app, project_info, [tv.value for tv in node_stack], self.state, self.state.document, target_handler, - self.parser_factory.create_compound_parser(project_info), + self.dox_parser, filter_, ) diff --git a/breathe/directives/function.py b/breathe/directives/function.py index 08a4dbba..f89830bf 100644 --- a/breathe/directives/function.py +++ b/breathe/directives/function.py @@ -5,7 +5,7 @@ from breathe.file_state_cache import MTimeError from breathe import parser from breathe.project import ProjectError -from breathe.renderer import format_parser_error, RenderContext, mask, TaggedNode +from breathe.renderer import format_parser_error, RenderContext, mask, TaggedNode, filter from breathe.renderer.sphinxrenderer import WithContext from breathe.renderer.sphinxrenderer import SphinxRenderer from breathe.renderer.target import create_target_handler @@ -29,6 +29,7 @@ from typing_extensions import NotRequired, TypedDict from breathe import project from docutils.nodes import Node + from sphinx.application import Sphinx DoxFunctionOptions = TypedDict( "DoxFunctionOptions", @@ -47,6 +48,34 @@ def __init__(self, signatures: List[str]) -> None: self.signatures = signatures +def function_and_all_friend_finder_filter( + app: Sphinx, + namespace: str, + name: str, + d_parser: parser.DoxygenParser, + project_info: project.ProjectInfo, + index: parser.DoxygenIndex, + matches: list[filter.FinderMatch], +) -> None: + # Get matching functions but only ones where the parent is not a group. + # We want to skip function entries in groups as we'll find the same + # functions in a file's xml output elsewhere and having more than one + # match is confusing for our logic later on. + for f_match in filter.member_finder_filter( + app, + namespace, + name, + d_parser, + project_info, + (parser.MemberKind.function, parser.MemberKind.friend), + index, + ): + cd = f_match[2].value + assert isinstance(cd, parser.Node_compounddefType) + if cd.kind != parser.DoxCompoundKind.group: + matches.append(f_match) + + class DoxygenFunctionDirective(BaseDirective): required_arguments = 1 option_spec = { @@ -90,7 +119,7 @@ def run(self) -> List[Node]: return warning.warn("doxygenfunction: %s" % e) try: - finder = self.finder_factory.create_finder(project_info) + d_index = self.get_doxygen_index(project_info) except MTimeError as e: warning = self.create_warning(None) return warning.warn("doxygenfunction: %s" % e) @@ -108,16 +137,15 @@ def run(self) -> List[Node]: ).warn( "doxygenfunction: Unable to resolve function " '"{namespace}{function}" with arguments "{args}".\n' - "Could not parse arguments. Parsing eror is\n{cpperror}" + "Could not parse arguments. Parsing error is\n{cpperror}" ) - finder_filter = self.filter_factory.create_function_and_all_friend_finder_filter( - namespace, function_name + matchesAll: list[filter.FinderMatch] = [] + function_and_all_friend_finder_filter( + self.app, namespace, function_name, self.dox_parser, project_info, d_index, matchesAll ) - matchesAll: list[list[TaggedNode]] = [] - finder.filter_(finder_filter, matchesAll) - matches: list[list[TaggedNode]] = [] + matches: list[filter.FinderMatch] = [] for m in matchesAll: # only take functions and friend functions # ignore friend classes @@ -166,7 +194,7 @@ def run(self) -> List[Node]: ) target_handler = create_target_handler(options, project_info, self.state.document) - filter_ = self.filter_factory.create_outline_filter(options) + filter_ = filter.create_outline_filter(options) return self.render( node_stack, @@ -233,13 +261,13 @@ def _create_function_signature( try: object_renderer = SphinxRenderer( - self.parser_factory.app, + self.dox_parser.app, project_info, [tn.value for tn in node_stack], self.state, self.state.document, target_handler, - self.parser_factory.create_compound_parser(project_info), + self.dox_parser, filter_, ) except parser.ParserError as e: @@ -274,7 +302,7 @@ def _create_function_signature( def _resolve_function( self, - matches: list[list[TaggedNode]], + matches: list[filter.FinderMatch], args: cpp.ASTParametersQualifiers | None, project_info: project.ProjectInfo, ): @@ -290,7 +318,7 @@ def _resolve_function( target_handler = create_target_handler( {"no-link": ""}, project_info, self.state.document ) - filter_ = self.filter_factory.create_outline_filter(text_options) + filter_ = filter.create_outline_filter(text_options) mask_factory = mask.MaskFactory({parser.Node_paramType: mask.no_parameter_names}) # Override the directive args for this render diff --git a/breathe/directives/index.py b/breathe/directives/index.py index b0142df5..8724f977 100644 --- a/breathe/directives/index.py +++ b/breathe/directives/index.py @@ -3,7 +3,7 @@ from breathe.directives import BaseDirective from breathe import parser from breathe.project import ProjectError -from breathe.renderer import format_parser_error, RenderContext, TaggedNode +from breathe.renderer import format_parser_error, RenderContext, TaggedNode, filter from breathe.renderer.mask import NullMaskFactory from breathe.renderer.sphinxrenderer import SphinxRenderer from breathe.renderer.target import create_target_handler @@ -11,11 +11,43 @@ from docutils.nodes import Node from docutils.parsers.rst.directives import unchanged_required, flag +from typing import Any, TYPE_CHECKING + +if TYPE_CHECKING: + from collections.abc import Mapping + class RootDataObject: pass +def create_index_filter(options: Mapping[str, Any]) -> filter.DoxFilter: + outline_filter = filter.create_outline_filter(options) + + def filter_(nstack: filter.NodeStack) -> bool: + if not outline_filter(nstack): + return False + + node = nstack.node + parent = nstack.parent + return not ( + isinstance(parent, parser.Node_compounddefType) + and ( + ( + isinstance(node, parser.Node_refType) + and nstack.tag in ("innerclass", "innernamespace") + ) + or ( + parent.kind == parser.DoxCompoundKind.group + and isinstance(node, parser.Node_sectiondefType) + and node.kind == parser.DoxSectionKind.func + ) + ) + ) + + return filter_ + + class _BaseIndexDirective(BaseDirective): """Base class handle the main work when given the appropriate project info to work from.""" @@ -25,7 +57,7 @@ class _BaseIndexDirective(BaseDirective): def handle_contents(self, project_info) -> list[Node]: try: - finder = self.finder_factory.create_finder(project_info) + d_index = self.get_doxygen_index(project_info) except parser.ParserError as e: return format_parser_error( self.name, e.message, e.filename, self.state, self.lineno, True @@ -33,25 +65,23 @@ def handle_contents(self, project_info) -> list[Node]: except parser.FileIOError as e: return format_parser_error(self.name, e.error, e.filename, self.state, self.lineno) - data_object = finder.root() - target_handler = create_target_handler(self.options, project_info, self.state.document) - filter_ = self.filter_factory.create_index_filter(self.options) + filter_ = create_index_filter(self.options) object_renderer = SphinxRenderer( - self.parser_factory.app, + self.dox_parser.app, project_info, - [data_object], + [d_index.root], self.state, self.state.document, target_handler, - self.parser_factory.create_compound_parser(project_info), + self.dox_parser, filter_, ) mask_factory = NullMaskFactory() context = RenderContext( - [TaggedNode(None, data_object), TaggedNode(None, RootDataObject())], + [TaggedNode(None, d_index.root), TaggedNode(None, RootDataObject())], mask_factory, self.directive_args, ) diff --git a/breathe/directives/item.py b/breathe/directives/item.py index dcd6e5c0..d3b58f33 100644 --- a/breathe/directives/item.py +++ b/breathe/directives/item.py @@ -2,9 +2,12 @@ from breathe.directives import BaseDirective from breathe.file_state_cache import MTimeError -from breathe.project import ProjectError +from breathe.project import ProjectError, ProjectInfo from breathe.renderer.mask import NullMaskFactory from breathe.renderer.target import create_target_handler +from breathe.renderer import filter +from breathe import parser +from breathe.renderer import TaggedNode from docutils.nodes import Node @@ -20,9 +23,6 @@ else: from typing_extensions import NotRequired, TypedDict - from breathe.renderer import TaggedNode - from breathe.renderer.filter import DoxFilter - DoxBaseItemOptions = TypedDict( "DoxBaseItemOptions", {"path": str, "project": str, "outline": NotRequired[None], "no-link": NotRequired[None]}, @@ -31,6 +31,33 @@ DoxBaseItemOptions = None +def enumvalue_finder_filter( + name: str, + d_parser: parser.DoxygenParser, + project_info: ProjectInfo, + index: parser.DoxygenIndex, +) -> filter.FinderMatchItr: + """Looks for an enumvalue with the specified name.""" + + for m, c in index.members[name]: + if m.kind != parser.MemberKind.enumvalue: + continue + + dc = d_parser.parse_compound(c.refid, project_info) + ev, mdef, sdef, cdef = dc.enumvalue_by_id[m.refid] + + TN = TaggedNode + yield [ + TN("enumvalue", ev), + TN("memberdef", mdef), + TN("sectiondef", sdef), + TN("compounddef", cdef), + TN("doxygen", dc.root), + TN("compound", c), + TN("doxygenindex", index.root), + ] + + class _DoxygenBaseItemDirective(BaseDirective): kind: ClassVar[str] @@ -44,10 +71,21 @@ class _DoxygenBaseItemDirective(BaseDirective): } has_content = False - def create_finder_filter(self, namespace: str, name: str) -> DoxFilter: - """Creates a filter to find the node corresponding to this item.""" - - return self.filter_factory.create_member_finder_filter(namespace, name, self.kind) + def finder_filter( + self, + namespace: str, + name: str, + project_info: ProjectInfo, + index: parser.DoxygenIndex, + matches: list[filter.FinderMatch], + ) -> None: + """A filter to find the node corresponding to this item.""" + + matches.extend( + filter.member_finder_filter( + self.app, namespace, name, self.dox_parser, project_info, self.kind, index + ) + ) def run(self) -> list[Node]: options = cast(DoxBaseItemOptions, self.options) @@ -61,15 +99,13 @@ def run(self) -> list[Node]: return warning.warn("doxygen{kind}: %s" % e) try: - finder = self.finder_factory.create_finder(project_info) + d_index = self.get_doxygen_index(project_info) except MTimeError as e: warning = self.create_warning(None, kind=self.kind) return warning.warn("doxygen{kind}: %s" % e) - finder_filter = self.create_finder_filter(namespace, name) - - matches: list[list[TaggedNode]] = [] - finder.filter_(finder_filter, matches) + matches: list[filter.FinderMatch] = [] + self.finder_filter(namespace, name, project_info, d_index, matches) if len(matches) == 0: display_name = "%s::%s" % (namespace, name) if namespace else name @@ -77,7 +113,7 @@ def run(self) -> list[Node]: return warning.warn('doxygen{kind}: Cannot find {kind} "{display_name}" {tail}') target_handler = create_target_handler(options, project_info, self.state.document) - filter_ = self.filter_factory.create_outline_filter(options) + filter_ = filter.create_outline_filter(options) node_stack = matches[0] mask_factory = NullMaskFactory() @@ -97,13 +133,20 @@ class DoxygenDefineDirective(_DoxygenBaseItemDirective): class DoxygenConceptDirective(_DoxygenBaseItemDirective): kind = "concept" - def create_finder_filter(self, namespace: str, name: str) -> DoxFilter: + def finder_filter( + self, + namespace: str, + name: str, + project_info: ProjectInfo, + index: parser.DoxygenIndex, + matches: list[filter.FinderMatch], + ) -> None: # Unions are stored in the xml file with their fully namespaced name # We're using C++ namespaces here, it might be best to make this file # type dependent # xml_name = "%s::%s" % (namespace, name) if namespace else name - return self.filter_factory.create_compound_finder_filter(xml_name, "concept") + matches.extend(filter.compound_finder_filter(xml_name, "concept", index)) class DoxygenEnumDirective(_DoxygenBaseItemDirective): @@ -113,8 +156,33 @@ class DoxygenEnumDirective(_DoxygenBaseItemDirective): class DoxygenEnumValueDirective(_DoxygenBaseItemDirective): kind = "enumvalue" - def create_finder_filter(self, namespace: str, name: str) -> DoxFilter: - return self.filter_factory.create_enumvalue_finder_filter(name) + def finder_filter( + self, + namespace: str, + name: str, + project_info: ProjectInfo, + index: parser.DoxygenIndex, + matches: list[filter.FinderMatch], + ) -> None: + for m, c in index.members[name]: + if m.kind != parser.MemberKind.enumvalue: + continue + + dc = self.dox_parser.parse_compound(c.refid, project_info) + ev, mdef, sdef, cdef = dc.enumvalue_by_id[m.refid] + + TN = TaggedNode + matches.append( + [ + TN("enumvalue", ev), + TN("memberdef", mdef), + TN("sectiondef", sdef), + TN("compounddef", cdef), + TN("doxygen", dc.root), + TN("compound", c), + TN("doxygenindex", index.root), + ] + ) class DoxygenTypedefDirective(_DoxygenBaseItemDirective): @@ -124,10 +192,17 @@ class DoxygenTypedefDirective(_DoxygenBaseItemDirective): class DoxygenUnionDirective(_DoxygenBaseItemDirective): kind = "union" - def create_finder_filter(self, namespace: str, name: str) -> DoxFilter: + def finder_filter( + self, + namespace: str, + name: str, + project_info: ProjectInfo, + index: parser.DoxygenIndex, + matches: list[filter.FinderMatch], + ) -> None: # Unions are stored in the xml file with their fully namespaced name # We're using C++ namespaces here, it might be best to make this file # type dependent # xml_name = "%s::%s" % (namespace, name) if namespace else name - return self.filter_factory.create_compound_finder_filter(xml_name, "union") + matches.extend(filter.compound_finder_filter(xml_name, "union", index)) diff --git a/breathe/directives/setup.py b/breathe/directives/setup.py index 58768f89..b2ec6a96 100644 --- a/breathe/directives/setup.py +++ b/breathe/directives/setup.py @@ -20,7 +20,7 @@ DoxygenEnumValueDirective, DoxygenTypedefDirective, ) -from breathe.parser import DoxygenParserFactory +from breathe.parser import DoxygenParser from breathe.project import ProjectInfoFactory from breathe.process import AutoDoxygenProcessHandle @@ -59,14 +59,14 @@ def setup(app: Sphinx) -> None: # note: the project_info_factory also contains some caching stuff # TODO: is that actually safe for when reading in parallel? project_info_factory = ProjectInfoFactory(app) - parser_factory = DoxygenParserFactory(app) + dox_parser = DoxygenParser(app) def set_temp_data( - app: Sphinx, project_info_factory=project_info_factory, parser_factory=parser_factory + app: Sphinx, project_info_factory=project_info_factory, parser_factory=dox_parser ): assert app.env is not None app.env.temp_data["breathe_project_info_factory"] = project_info_factory - app.env.temp_data["breathe_parser_factory"] = parser_factory + app.env.temp_data["breathe_dox_parser"] = parser_factory app.connect("source-read", lambda app, docname, source: set_temp_data(app)) diff --git a/breathe/finder/__init__.py b/breathe/finder/__init__.py index 139cb736..e2a07182 100644 --- a/breathe/finder/__init__.py +++ b/breathe/finder/__init__.py @@ -4,9 +4,10 @@ if TYPE_CHECKING: from breathe.project import ProjectInfo - from breathe.finder.factory import DoxygenItemFinderFactory - from breathe.renderer.filter import DoxFilter + from breathe.finder.factory import FinderCreatorMap + from breathe.renderer.filter import DoxFilter, FinderMatch from breathe.renderer import TaggedNode, T_data_object + from breathe import parser else: T_data_object = TypeVar("T_data_object", covariant=True) @@ -16,13 +17,30 @@ def __init__( self, project_info: ProjectInfo, node: TaggedNode[T_data_object], - item_finder_factory: DoxygenItemFinderFactory, + finders: FinderCreatorMap, ): self.node = node - self.item_finder_factory: DoxygenItemFinderFactory = item_finder_factory + self.finders: FinderCreatorMap = finders self.project_info = project_info + def run_filter( + self, + filter_: DoxFilter, + matches: list[FinderMatch], + node_stack: list[TaggedNode], + item: parser.NodeOrValue, + tag: str | None = None, + ) -> None: + """Adds all nodes which match the filter into the matches list""" + + item_finder = factory.create_item_finder(self.finders, self.project_info, item, tag) + item_finder.filter_(node_stack, filter_, matches) + def filter_( - self, ancestors: list[TaggedNode], filter_: DoxFilter, matches: list[list[TaggedNode]] + self, ancestors: list[TaggedNode], filter_: DoxFilter, matches: list[FinderMatch] ) -> None: raise NotImplementedError + + +# ItemFinder needs to be defined before we can import any submodules +from breathe.finder import factory # noqa: E402 diff --git a/breathe/finder/compound.py b/breathe/finder/compound.py index f347e182..76bd4a35 100644 --- a/breathe/finder/compound.py +++ b/breathe/finder/compound.py @@ -7,21 +7,20 @@ from typing import TYPE_CHECKING if TYPE_CHECKING: - from breathe.renderer.filter import DoxFilter + from breathe.renderer.filter import DoxFilter, FinderMatch class DoxygenTypeSubItemFinder(ItemFinder[parser.Node_DoxygenType]): - def filter_(self, ancestors, filter_: DoxFilter, matches: list[list[TaggedNode]]) -> None: + def filter_(self, ancestors, filter_: DoxFilter, matches: list[FinderMatch]) -> None: """Find nodes which match the filter. Doesn't test this node, only its children""" node_stack = [self.node] + ancestors assert len(self.node.value.compounddef) == 1 - compound_finder = self.item_finder_factory.create_finder(self.node.value.compounddef[0]) - compound_finder.filter_(node_stack, filter_, matches) + self.run_filter(filter_, matches, node_stack, self.node.value.compounddef[0]) class CompoundDefTypeSubItemFinder(ItemFinder[parser.Node_compounddefType]): - def filter_(self, ancestors, filter_: DoxFilter, matches: list[list[TaggedNode]]) -> None: + def filter_(self, ancestors, filter_: DoxFilter, matches: list[FinderMatch]) -> None: """Finds nodes which match the filter and continues checks to children""" node_stack = [self.node] + ancestors @@ -29,16 +28,14 @@ def filter_(self, ancestors, filter_: DoxFilter, matches: list[list[TaggedNode]] matches.append(node_stack) for sectiondef in self.node.value.sectiondef: - finder = self.item_finder_factory.create_finder(sectiondef) - finder.filter_(node_stack, filter_, matches) + self.run_filter(filter_, matches, node_stack, sectiondef) for innerclass in self.node.value.innerclass: - finder = self.item_finder_factory.create_finder(innerclass, "innerclass") - finder.filter_(node_stack, filter_, matches) + self.run_filter(filter_, matches, node_stack, innerclass, "innerclass") class SectionDefTypeSubItemFinder(ItemFinder[parser.Node_sectiondefType]): - def filter_(self, ancestors, filter_: DoxFilter, matches: list[list[TaggedNode]]) -> None: + def filter_(self, ancestors, filter_: DoxFilter, matches: list[FinderMatch]) -> None: """Find nodes which match the filter. Doesn't test this node, only its children""" node_stack = [self.node] + ancestors @@ -46,12 +43,11 @@ def filter_(self, ancestors, filter_: DoxFilter, matches: list[list[TaggedNode]] matches.append(node_stack) for memberdef in self.node.value.memberdef: - finder = self.item_finder_factory.create_finder(memberdef) - finder.filter_(node_stack, filter_, matches) + self.run_filter(filter_, matches, node_stack, memberdef) class MemberDefTypeSubItemFinder(ItemFinder[parser.Node_memberdefType]): - def filter_(self, ancestors, filter_: DoxFilter, matches: list[list[TaggedNode]]) -> None: + def filter_(self, ancestors, filter_: DoxFilter, matches: list[FinderMatch]) -> None: data_object = self.node.value node_stack = [self.node] + ancestors @@ -66,7 +62,7 @@ def filter_(self, ancestors, filter_: DoxFilter, matches: list[list[TaggedNode]] class RefTypeSubItemFinder(ItemFinder[parser.Node_refType]): - def filter_(self, ancestors, filter_: DoxFilter, matches: list[list[TaggedNode]]) -> None: + def filter_(self, ancestors, filter_: DoxFilter, matches: list[FinderMatch]) -> None: node_stack = [self.node] + ancestors if filter_(NodeStack(node_stack)): matches.append(node_stack) diff --git a/breathe/finder/factory.py b/breathe/finder/factory.py index 430bce91..0e6de9ef 100644 --- a/breathe/finder/factory.py +++ b/breathe/finder/factory.py @@ -1,23 +1,22 @@ from __future__ import annotations -from breathe.finder import ItemFinder from breathe.finder import index as indexfinder from breathe.finder import compound as compoundfinder from breathe import parser -from breathe.project import ProjectInfo from breathe.renderer import TaggedNode -from sphinx.application import Sphinx - from typing import Callable, TYPE_CHECKING, Union if TYPE_CHECKING: - from breathe.renderer.filter import DoxFilter + from breathe.renderer.filter import DoxFilter, FinderMatch + from breathe.project import ProjectInfo + from breathe.finder import ItemFinder + from sphinx.application import Sphinx - ItemFinderCreator = Callable[[ProjectInfo, TaggedNode, "DoxygenItemFinderFactory"], ItemFinder] + ItemFinderCreator = Callable[[ProjectInfo, TaggedNode, "FinderCreatorMap"], ItemFinder] + FinderCreatorMap = dict[type[parser.NodeOrValue], ItemFinderCreator] FinderRoot = Union[ - parser.Node_DoxygenTypeIndex, parser.Node_CompoundType, parser.Node_MemberType, parser.Node_DoxygenType, @@ -29,63 +28,52 @@ class _CreateCompoundTypeSubFinder: - def __init__(self, app: Sphinx, parser_factory: parser.DoxygenParserFactory): + def __init__(self, app: Sphinx, dox_parser: parser.DoxygenParser): self.app = app - self.parser_factory = parser_factory + self.dox_parser = dox_parser def __call__(self, project_info: ProjectInfo, *args) -> indexfinder.CompoundTypeSubItemFinder: - compound_parser = self.parser_factory.create_compound_parser(project_info) - return indexfinder.CompoundTypeSubItemFinder(self.app, compound_parser, project_info, *args) - + return indexfinder.CompoundTypeSubItemFinder(self.app, self.dox_parser, project_info, *args) -class DoxygenItemFinderFactory: - def __init__( - self, finders: dict[type[parser.NodeOrValue], ItemFinderCreator], project_info: ProjectInfo - ): - self.finders = finders - self.project_info = project_info - def create_finder(self, data_object: parser.NodeOrValue, tag: str | None = None) -> ItemFinder: - return self.finders[type(data_object)]( - self.project_info, TaggedNode(tag, data_object), self - ) +def create_item_finder( + finders: dict[type[parser.NodeOrValue], ItemFinderCreator], + project_info: ProjectInfo, + data_object: parser.NodeOrValue, + tag: str | None = None, +) -> ItemFinder: + return finders[type(data_object)](project_info, TaggedNode(tag, data_object), finders) class Finder: - def __init__(self, root, item_finder_factory: DoxygenItemFinderFactory) -> None: + def __init__( + self, root: parser.NodeOrValue, project_info: ProjectInfo, finders: FinderCreatorMap + ): self._root = root - self.item_finder_factory = item_finder_factory + self.project_info = project_info + self.finders = finders - def filter_(self, filter_: DoxFilter, matches: list[list[TaggedNode]]) -> None: + def filter_(self, filter_: DoxFilter, matches: list[FinderMatch]) -> None: """Adds all nodes which match the filter into the matches list""" - item_finder = self.item_finder_factory.create_finder(self._root) + item_finder = create_item_finder(self.finders, self.project_info, self._root) item_finder.filter_([], filter_, matches) def root(self): return self._root -class FinderFactory: - def __init__(self, app: Sphinx, parser_factory: parser.DoxygenParserFactory): - self.app = app - self.parser_factory = parser_factory - self.parser = parser_factory.create_index_parser() - - def create_finder(self, project_info: ProjectInfo) -> Finder: - root = self.parser.parse(project_info) - return self.create_finder_from_root(root, project_info) - - def create_finder_from_root(self, root: FinderRoot, project_info: ProjectInfo) -> Finder: - finders: dict[type[parser.NodeOrValue], ItemFinderCreator] = { - parser.Node_DoxygenTypeIndex: indexfinder.DoxygenTypeSubItemFinder, - parser.Node_CompoundType: _CreateCompoundTypeSubFinder(self.app, self.parser_factory), - parser.Node_MemberType: indexfinder.MemberTypeSubItemFinder, - parser.Node_DoxygenType: compoundfinder.DoxygenTypeSubItemFinder, - parser.Node_compounddefType: compoundfinder.CompoundDefTypeSubItemFinder, - parser.Node_sectiondefType: compoundfinder.SectionDefTypeSubItemFinder, - parser.Node_memberdefType: compoundfinder.MemberDefTypeSubItemFinder, - parser.Node_refType: compoundfinder.RefTypeSubItemFinder, - } - item_finder_factory = DoxygenItemFinderFactory(finders, project_info) - return Finder(root, item_finder_factory) +def create_finder_from_root( + app: Sphinx, dox_parser: parser.DoxygenParser, root: FinderRoot, project_info: ProjectInfo +) -> Finder: + finders: FinderCreatorMap = { + parser.Node_CompoundType: _CreateCompoundTypeSubFinder(app, dox_parser), + parser.Node_MemberType: indexfinder.MemberTypeSubItemFinder, + parser.Node_DoxygenType: compoundfinder.DoxygenTypeSubItemFinder, + parser.Node_compounddefType: compoundfinder.CompoundDefTypeSubItemFinder, + parser.Node_sectiondefType: compoundfinder.SectionDefTypeSubItemFinder, + parser.Node_memberdefType: compoundfinder.MemberDefTypeSubItemFinder, + parser.Node_refType: compoundfinder.RefTypeSubItemFinder, + } + + return Finder(root, project_info, finders) diff --git a/breathe/finder/index.py b/breathe/finder/index.py index 8170019e..19993c3c 100644 --- a/breathe/finder/index.py +++ b/breathe/finder/index.py @@ -1,7 +1,7 @@ from __future__ import annotations from breathe.finder import ItemFinder -from breathe.renderer.filter import FilterFactory, NodeStack +from breathe.renderer.filter import NodeStack from breathe import parser from breathe.renderer import TaggedNode @@ -10,7 +10,7 @@ from typing import TYPE_CHECKING if TYPE_CHECKING: - from breathe.renderer.filter import DoxFilter + from breathe.renderer.filter import DoxFilter, FinderMatch class DoxygenTypeSubItemFinder(ItemFinder[parser.Node_DoxygenTypeIndex]): @@ -20,16 +20,13 @@ def filter_(self, ancestors, filter_: DoxFilter, matches) -> None: compounds = self.node.value.compound node_stack = [self.node] + ancestors for compound in compounds: - compound_finder = self.item_finder_factory.create_finder(compound) - compound_finder.filter_(node_stack, filter_, matches) + self.run_filter(filter_, matches, node_stack, compound) class CompoundTypeSubItemFinder(ItemFinder[parser.Node_CompoundType]): - def __init__(self, app: Sphinx, compound_parser: parser.DoxygenCompoundParser, *args): + def __init__(self, app: Sphinx, dox_parser: parser.DoxygenParser, *args): super().__init__(*args) - - self.filter_factory = FilterFactory(app) - self.compound_parser = compound_parser + self.dox_parser = dox_parser def filter_(self, ancestors: list[TaggedNode], filter_: DoxFilter, matches) -> None: """Finds nodes which match the filter and continues checks to children @@ -48,16 +45,16 @@ def filter_(self, ancestors: list[TaggedNode], filter_: DoxFilter, matches) -> N # Descend to member children members = self.node.value.member - member_matches: list[list[TaggedNode]] = [] + member_matches: list[FinderMatch] = [] for member in members: - member_finder = self.item_finder_factory.create_finder(member) - member_finder.filter_(node_stack, filter_, member_matches) + self.run_filter(filter_, member_matches, node_stack, member) # If there are members in this compound that match the criteria # then load up the file for this compound and get the member data objects if member_matches: - file_data = self.compound_parser.parse(self.node.value.refid) - finder = self.item_finder_factory.create_finder(file_data) + file_data = self.dox_parser.parse_compound( + self.node.value.refid, self.project_info + ).root for member_stack in member_matches: mem = member_stack[0].value @@ -68,12 +65,13 @@ def ref_filter(nstack): node = nstack.node return isinstance(node, parser.Node_memberdefType) and node.id == refid - finder.filter_(node_stack, ref_filter, matches) + self.run_filter(ref_filter, matches, node_stack, file_data) else: # Read in the xml file referenced by the compound and descend into that as well - file_data = self.compound_parser.parse(self.node.value.refid) - finder = self.item_finder_factory.create_finder(file_data) - finder.filter_(node_stack, filter_, matches) + file_data = self.dox_parser.parse_compound( + self.node.value.refid, self.project_info + ).root + self.run_filter(filter_, matches, node_stack, file_data) class MemberTypeSubItemFinder(ItemFinder[parser.Node_memberdefType]): diff --git a/breathe/parser.py b/breathe/parser.py index 238dea36..ea412f4a 100644 --- a/breathe/parser.py +++ b/breathe/parser.py @@ -3,6 +3,7 @@ from __future__ import annotations import reprlib +import collections from breathe import file_state_cache, path_handler from breathe.project import ProjectInfo @@ -10,11 +11,13 @@ from sphinx.application import Sphinx -from typing import overload, TYPE_CHECKING +from typing import overload, TYPE_CHECKING, TypeVar if TYPE_CHECKING: NodeOrValue = Node | str | None +T_inv = TypeVar("T_inv") + @reprlib.recursive_repr() def node_repr(self: Node) -> str: # pragma: no cover @@ -89,72 +92,95 @@ def __init__(self, error: str, filename: str): self.filename = filename -class Parser: - def __init__(self, app: Sphinx, cache: dict[str, Node_DoxygenTypeIndex | Node_DoxygenType]): +class DoxygenIndex: + def __init__(self, root: Node_DoxygenTypeIndex): + self.root = root + self.compounds: collections.defaultdict[ + str, list[Node_CompoundType] + ] = collections.defaultdict(list) + self.members: collections.defaultdict[ + str, list[tuple[Node_MemberType, Node_CompoundType]] + ] = collections.defaultdict(list) + + self.file_compounds: list[Node_CompoundType] = [] + + for c in root.compound: + self.compounds[c.name].append(c) + if c.kind == CompoundKind.file: + self.file_compounds.append(c) + for m in c.member: + self.members[m.name].append((m, c)) + + +class DoxygenCompound: + def __init__(self, root: Node_DoxygenType): + self.root = root + self.members_by_id: dict[ + str, tuple[Node_memberdefType, Node_sectiondefType, Node_compounddefType] + ] = {} + self.enumvalue_by_id: dict[ + str, + tuple[ + Node_enumvalueType, Node_memberdefType, Node_sectiondefType, Node_compounddefType + ], + ] = {} + + for c in root.compounddef: + for s in c.sectiondef: + for m in s.memberdef: + self.members_by_id[m.id] = (m, s, c) + for ev in m.enumvalue: + self.enumvalue_by_id[ev.id] = (ev, m, s, c) + + +def _parse_common(filename: str, right_tag: str) -> Node_DoxygenType | Node_DoxygenTypeIndex: + try: + with open(filename, "rb") as file: + result = parse_file(file) + if result.name != right_tag: + raise ParserError(f'expected "{right_tag}" root element, not "{result.name}"', filename) + + return result.value + except ParseError as e: + raise ParserError(e.message, filename, e.lineno) + except IOError as e: + raise FileIOError(str(e), filename) + + +class DoxygenParser: + def __init__(self, app: Sphinx) -> None: self.app = app - self.cache = cache - - def _parse_common( - self, filename: str, right_tag: str - ) -> Node_DoxygenTypeIndex | Node_DoxygenType: - try: - # Try to get from our cache - return self.cache[filename] - except KeyError: - # If that fails, parse it afresh - try: - with open(filename, "rb") as file: - result = parse_file(file) - if result.name != right_tag: - raise ParserError( - f'expected "{right_tag}" root element, not "{result.name}"', filename - ) - self.cache[filename] = result.value - return result.value - except ParseError as e: - raise ParserError(e.message, filename, e.lineno) - except IOError as e: - raise FileIOError(str(e), filename) - - -class DoxygenIndexParser(Parser): - def parse(self, project_info: ProjectInfo) -> Node_DoxygenTypeIndex: - filename = path_handler.resolve_path(self.app, project_info.project_path(), "index.xml") - file_state_cache.update(self.app, filename) - - r = self._parse_common(filename, "doxygenindex") - assert isinstance(r, Node_DoxygenTypeIndex) - return r - - -class DoxygenCompoundParser(Parser): - def __init__(self, app: Sphinx, cache, project_info: ProjectInfo) -> None: - super().__init__(app, cache) + self.compound_index: DoxygenIndex | None = None + self.compound_cache: dict[str, DoxygenCompound] = {} - self.project_info = project_info + def parse_index(self, project_info: ProjectInfo) -> DoxygenIndex: + r: DoxygenIndex | None = self.compound_index + if r is None: + filename = path_handler.resolve_path(self.app, project_info.project_path(), "index.xml") - def parse(self, refid: str) -> Node_DoxygenType: - filename = path_handler.resolve_path( - self.app, self.project_info.project_path(), f"{refid}.xml" - ) + file_state_cache.update(self.app, filename) - file_state_cache.update(self.app, filename) + n = _parse_common(filename, "doxygenindex") + assert isinstance(n, Node_DoxygenTypeIndex) + r = DoxygenIndex(n) - r = self._parse_common(filename, "doxygen") - assert isinstance(r, Node_DoxygenType) + self.compound_index = r return r + def parse_compound(self, refid: str, project_info: ProjectInfo) -> DoxygenCompound: + r = self.compound_cache.get(refid) + if r is None: + filename = path_handler.resolve_path( + self.app, project_info.project_path(), f"{refid}.xml" + ) -class DoxygenParserFactory: - def __init__(self, app: Sphinx) -> None: - self.app = app - self.cache: dict[str, Node_DoxygenType | Node_DoxygenTypeIndex] = {} + file_state_cache.update(self.app, filename) - def create_index_parser(self) -> DoxygenIndexParser: - return DoxygenIndexParser(self.app, self.cache) - - def create_compound_parser(self, project_info: ProjectInfo) -> DoxygenCompoundParser: - return DoxygenCompoundParser(self.app, self.cache, project_info) + n = _parse_common(filename, "doxygen") + assert isinstance(n, Node_DoxygenType) + r = DoxygenCompound(n) + self.compound_cache[refid] = r + return r @overload diff --git a/breathe/renderer/filter.py b/breathe/renderer/filter.py index 65ef0dfe..087a9b4f 100644 --- a/breathe/renderer/filter.py +++ b/breathe/renderer/filter.py @@ -45,12 +45,11 @@ from __future__ import annotations -from breathe import path_handler, parser +from breathe import parser, renderer from sphinx.application import Sphinx -import os -from typing import Any, Callable, Literal, SupportsIndex, TYPE_CHECKING, TypeVar +from typing import Any, Callable, SupportsIndex, TYPE_CHECKING, TypeVar from collections.abc import Container, Iterable, Mapping if TYPE_CHECKING: @@ -60,19 +59,22 @@ from typing import TypeAlias else: from typing_extensions import TypeAlias - from breathe import renderer + from breathe.directives.class_like import DoxClassOptions from breathe.directives.content_block import DoxContentBlockOptions + from breathe.project import ProjectInfo + + DoxFilter: TypeAlias = Callable[["NodeStack"], bool] + + FinderMatch: TypeAlias = list[renderer.TaggedNode] + FinderMatchItr: TypeAlias = Iterable[FinderMatch] + + DoxIndexFilter: TypeAlias = Callable[[parser.DoxygenIndex], FinderMatchItr] DoxNamespaceOptions: TypeAlias = DoxClassOptions | DoxContentBlockOptions T_options = TypeVar("T_options", DoxClassOptions, DoxContentBlockOptions) - DoxFilter: TypeAlias = Callable[["NodeStack"], bool] -else: - DoxClassOptions = None - DoxNamespaceOptions = None - CLASS_LIKE_COMPOUNDDEF = ( parser.DoxCompoundKind.class_, @@ -104,529 +106,280 @@ def tag(self) -> str: return tag -def path_matches(location: str, target_file: str) -> bool: - if path_handler.includes_directory(target_file): - # If the target_file contains directory separators then - # match against the same length at the end of the location - # - location_match = location[-len(target_file) :] - return location_match == target_file +def set_defaults(app: Sphinx, options: T_options) -> T_options: + r: Any = options.copy() + for m in app.config.breathe_default_members: + r.setdefault(m, "") + return r - # If there are no separators, match against the whole filename - # at the end of the location - # - # This is to prevent "Util.cpp" matching "PathUtil.cpp" - # - location_basename = os.path.basename(location) - return location_basename == target_file +def create_show_filter(options: Mapping[str, Any]) -> DoxFilter: + """Currently only handles the header-file entry""" -def location_matches(location: parser.Node_locationType | None, target_file: str) -> bool: - return location is not None and path_matches(location.file, target_file) + if options.get("show") == "header-file": + return lambda nstack: True + # Allow through everything except the header-file includes nodes + def filter_(nstack: NodeStack) -> bool: + return not ( + isinstance(nstack.parent, parser.Node_compounddefType) + and isinstance(nstack.node, parser.Node_incType) + ) -def namespace_matches(name: str, node: parser.Node_compounddefType): - to_find = name.rpartition("::")[0] - return any(to_find == "".join(ns) for ns in node.innernamespace) or any( - to_find == "".join(ns) for ns in node.innerclass - ) + return filter_ -class FilterFactory: - # C++ style public entries - public_kinds = set( - [ - "public-type", - "public-func", - "public-attrib", - "public-slot", - "public-static-func", - "public-static-attrib", - ] - ) +def _create_undoc_members_filter(options: DoxNamespaceOptions) -> DoxFilter: + if "undoc-members" in options: + return lambda nstack: True - def __init__(self, app: Sphinx) -> None: - self.app = app - - def set_defaults(self, options: T_options) -> T_options: - r: Any = options.copy() - for m in self.app.config.breathe_default_members: - r.setdefault(m, "") - return r - - def create_render_filter( - self, kind: Literal["group", "page", "namespace"], options: DoxContentBlockOptions - ) -> DoxFilter: - """Render filter for group & namespace blocks""" - - filter_options = self.set_defaults(options) - - if "desc-only" in filter_options: - return self._create_description_filter(True, parser.Node_compounddefType) - - cm_filter = self.create_class_member_filter(filter_options) - ic_filter = self.create_innerclass_filter(filter_options) - o_filter = self.create_outline_filter(filter_options) - - def filter(nstack: NodeStack) -> bool: - grandparent = nstack.ancestor(2) - return ( - ( - cm_filter(nstack) - or ( - isinstance(grandparent, parser.Node_compounddefType) - and grandparent.kind not in CLASS_LIKE_COMPOUNDDEF - and isinstance(nstack.node, parser.Node_memberdefType) - ) - ) - and ic_filter(nstack) - and o_filter(nstack) - ) + def filter_(nstack: NodeStack) -> bool: + node = nstack.node + # Allow anything that isn't a Node_memberdefType, or if it is only + # allow the ones with a description + return (not isinstance(node, parser.Node_memberdefType)) or bool( + parser.description_has_content(node.briefdescription) + or parser.description_has_content(node.detaileddescription) + ) - return filter + return filter_ - def create_class_filter(self, target: str, options: DoxClassOptions) -> DoxFilter: - """Content filter for classes based on various directive options""" - filter_options = self.set_defaults(options) +def _create_public_members_filter( + options: DoxNamespaceOptions, +) -> Callable[[parser.Node_memberdefType], bool]: + if "members" in options: + # If the user has specified the 'members' option with arguments then + # we only pay attention to that and not to any other member settings + members_str = options["members"] + if members_str and not members_str.isspace(): + # Matches sphinx-autodoc behaviour of comma separated values + members = frozenset([x.strip() for x in members_str.split(",")]) - cm_filter = self.create_class_member_filter(filter_options) - ic_filter = self.create_innerclass_filter(filter_options, outerclass=target) - o_filter = self.create_outline_filter(filter_options) - s_filter = self.create_show_filter(filter_options) + # Accept any nodes which don't have a "sectiondef" as a parent + # or, if they do, only accept them if their names are in the + # members list + def filter_(node: parser.Node_memberdefType) -> bool: + return node.name in members - return ( - lambda nstack: cm_filter(nstack) - and ic_filter(nstack) - and o_filter(nstack) - and s_filter(nstack) - ) + else: + # Select anything that doesn't have a parent which is a + # sectiondef, or, if it does, only select the public ones + def filter_(node: parser.Node_memberdefType) -> bool: + return node.prot == parser.DoxProtectionKind.public - @classmethod - def create_innerclass_filter( - cls, options: DoxNamespaceOptions, outerclass: str = "" - ) -> DoxFilter: - """ - :param outerclass: Should be the class/struct being target by the directive calling this - code. If it is a group or namespace directive then it should be left - blank. It is used when looking for names listed in the :members: option. - - The name should include any additional namespaces that the target class - is in. - """ - allowed: set[parser.DoxProtectionKind] = set() - if "protected-members" in options: - allowed.add(parser.DoxProtectionKind.protected) - if "private-members" in options: - allowed.add(parser.DoxProtectionKind.private) - - description = cls._create_description_filter(True, parser.Node_compounddefType) - - members: set[str] | None = None - if "members" in options: - members_str = options["members"] - if members_str and members_str.strip(): - prefix = ("%s::" % outerclass) if outerclass else "" - - # Matches sphinx-autodoc behaviour of comma separated values - members = set(["%s%s" % (prefix, x.strip()) for x in members_str.split(",")]) - else: - allowed.add(parser.DoxProtectionKind.public) - - def filter(nstack: NodeStack) -> bool: - node = nstack.node - parent = nstack.parent - - return ( - not ( - isinstance(node, parser.Node_refType) - and nstack.tag == "innerclass" - and isinstance(parent, parser.Node_compounddefType) - and parent.kind in CLASS_LIKE_COMPOUNDDEF - ) - or node.prot in allowed - or (members is not None and "".join(node) in members) - or description(nstack) - ) + else: + # Nothing with a parent that's a sectiondef + def filter_(node: parser.Node_memberdefType) -> bool: + return False - return filter + return filter_ - @staticmethod - def create_show_filter(options: Mapping[str, Any]) -> DoxFilter: - """Currently only handles the header-file entry""" - if options.get("show") == "header-file": - return lambda nstack: True +def create_description_filter(allow: bool, level: type[parser.Node]) -> DoxFilter: + """Whether or not we allow descriptions is determined by the calling function and we just do + whatever the 'allow' function parameter tells us. + """ - # Allow through everything except the header-file includes nodes - def filter(nstack: NodeStack) -> bool: - return not ( - isinstance(nstack.parent, parser.Node_compounddefType) - and isinstance(nstack.node, parser.Node_incType) + if allow: + # Let through any description children of sectiondefs if we output any kind members + def filter_(nstack: NodeStack) -> bool: + return not isinstance(nstack.parent, level) or isinstance( + nstack.node, parser.Node_descriptionType ) - return filter + else: + # Nothing with a parent that's a sectiondef + def filter_(nstack: NodeStack) -> bool: + return not isinstance(nstack.parent, level) - @staticmethod - def _create_description_filter(allow: bool, level: type[parser.Node]) -> DoxFilter: - """Whether or not we allow descriptions is determined by the calling function and we just do - whatever the 'allow' function parameter tells us. - """ + return filter_ - if allow: - # Let through any description children of sectiondefs if we output any kind members - def filter(nstack: NodeStack) -> bool: - return not isinstance(nstack.parent, level) or isinstance( - nstack.node, parser.Node_descriptionType - ) - else: - # Nothing with a parent that's a sectiondef - def filter(nstack: NodeStack) -> bool: - return not isinstance(nstack.parent, level) - - return filter - - @staticmethod - def _create_public_members_filter( - options: DoxNamespaceOptions, - ) -> Callable[[parser.Node_memberdefType], bool]: - if "members" in options: - # If the user has specified the 'members' option with arguments then - # we only pay attention to that and not to any other member settings - members_str = options["members"] - if members_str and not members_str.isspace(): - # Matches sphinx-autodoc behaviour of comma separated values - members = frozenset([x.strip() for x in members_str.split(",")]) - - # Accept any nodes which don't have a "sectiondef" as a parent - # or, if they do, only accept them if their names are in the - # members list - def filter(node: parser.Node_memberdefType) -> bool: - return node.name in members - - else: - # Select anything that doesn't have a parent which is a - # sectiondef, or, if it does, only select the public ones - def filter(node: parser.Node_memberdefType) -> bool: - return node.prot == parser.DoxProtectionKind.public +def create_class_member_filter(options: DoxNamespaceOptions) -> DoxFilter: + """Content filter based on :members: and :private-members: classes""" - else: - # Nothing with a parent that's a sectiondef - def filter(node: parser.Node_memberdefType) -> bool: - return False - - return filter - - @staticmethod - def _create_undoc_members_filter(options: DoxNamespaceOptions) -> DoxFilter: - if "undoc-members" in options: - return lambda nstack: True - - def filter(nstack: NodeStack) -> bool: - node = nstack.node - # Allow anything that isn't a Node_memberdefType, or if it is only - # allow the ones with a description - return (not isinstance(node, parser.Node_memberdefType)) or bool( - parser.description_has_content(node.briefdescription) - or parser.description_has_content(node.detaileddescription) - ) - - return filter + # I can't fully explain the filtering of descriptions here. More testing needed to figure + # out when it is needed. This approach reflects the old code that was here but it wasn't + # commented (my fault.) I wonder if maybe the public and private declarations themselves can + # be documented and we need to let them through. Not sure. + allow = "members" in options or "protected-members" in options or "private-members" in options - @classmethod - def create_class_member_filter(cls, options: DoxNamespaceOptions) -> DoxFilter: - """Content filter based on :members: and :private-members: classes""" + description = create_description_filter(allow, parser.Node_sectiondefType) - # I can't fully explain the filtering of descriptions here. More testing needed to figure - # out when it is needed. This approach reflects the old code that was here but it wasn't - # commented (my fault.) I wonder if maybe the public and private declarations themselves can - # be documented and we need to let them through. Not sure. - allow = ( - "members" in options or "protected-members" in options or "private-members" in options - ) + # Create all necessary filters and combine them + public_members = _create_public_members_filter(options) - description = cls._create_description_filter(allow, parser.Node_sectiondefType) - - # Create all necessary filters and combine them - public_members = cls._create_public_members_filter(options) - - undoc_members = cls._create_undoc_members_filter(options) - - prot_filter: tuple[parser.DoxProtectionKind, ...] = () - if "protected-members" in options: - prot_filter += (parser.DoxProtectionKind.protected,) - if "private-members" in options: - prot_filter += (parser.DoxProtectionKind.private,) - - # Allow anything that isn't a memberdef, or if it is, and 'prot' is not - # empty, allow the ones with an equal 'prot' attribute - def filter(nstack: NodeStack) -> bool: - node = nstack.node - return ( - ( - not ( - isinstance(node, parser.Node_memberdefType) - and isinstance(nstack.parent, parser.Node_sectiondefType) - ) - or (bool(prot_filter) and node.prot in prot_filter) - or public_members(node) - ) - and undoc_members(nstack) - ) or description(nstack) + undoc_members = _create_undoc_members_filter(options) - return filter + prot_filter: tuple[parser.DoxProtectionKind, ...] = () + if "protected-members" in options: + prot_filter += (parser.DoxProtectionKind.protected,) + if "private-members" in options: + prot_filter += (parser.DoxProtectionKind.private,) - @staticmethod - def create_outline_filter(options: Mapping[str, Any]) -> DoxFilter: - if "outline" in options: - return lambda nstack: not isinstance( - nstack.node, (parser.Node_descriptionType, parser.Node_incType) + # Allow anything that isn't a memberdef, or if it is, and 'prot' is not + # empty, allow the ones with an equal 'prot' attribute + def filter_(nstack: NodeStack) -> bool: + node = nstack.node + return ( + ( + not ( + isinstance(node, parser.Node_memberdefType) + and isinstance(nstack.parent, parser.Node_sectiondefType) + ) + or (bool(prot_filter) and node.prot in prot_filter) + or public_members(node) ) + and undoc_members(nstack) + ) or description(nstack) - return lambda nstack: True - - @classmethod - def create_file_filter( - cls, - filename: str, - options: Mapping[str, Any], - *, - init_valid_names: Iterable[str] | None = None, - ) -> DoxFilter: - valid_names: set[str] = set() - if init_valid_names: - valid_names.update(init_valid_names) - - outline_filter = cls.create_outline_filter(options) - - def filter(nstack: NodeStack) -> bool: - if not outline_filter(nstack): - return False - - node = nstack.node - parent = nstack.parent - if isinstance(node, parser.Node_compounddefType): - if node.kind == parser.DoxCompoundKind.file: - # Gather the "namespaces" attribute from the - # compounddef for the file we're rendering and - # store the information in the "valid_names" list - if location_matches(node.location, filename): - valid_names.update("".join(ns) for ns in node.innernamespace) - valid_names.update("".join(ns) for ns in node.innerclass) - - if node.kind != parser.DoxCompoundKind.namespace: - # Ignore compounddefs which are from another file - # (normally means classes and structs which are in a - # namespace that we have other interests in) but only - # check it if the compounddef is not a namespace - # itself, as for some reason compounddefs for - # namespaces are registered with just a single file - # location even if they namespace is spread over - # multiple files - return location_matches(node.location, filename) - - elif isinstance(node, parser.Node_refType): - name = "".join(node) - if isinstance(parent, parser.Node_compounddefType) and nstack.tag in { - "innerclass", - "innernamespace", - }: - # Take the valid_names and every time we handle an - # innerclass or innernamespace, check that its name - # was one of those initial valid names so that we - # never end up rendering a namespace or class that - # wasn't in the initial file. Notably this is - # required as the location attribute for the - # namespace in the xml is unreliable. - if name not in valid_names: - return False - - # Ignore innerclasses and innernamespaces that are inside a - # namespace that is going to be rendered as they will be - # rendered with that namespace and we don't want them twice - if namespace_matches(name, parent): - return False - - elif isinstance(node, parser.Node_memberdefType): - # Ignore memberdefs from files which are different to - # the one we're rendering. This happens when we have to - # cross into a namespace xml file which has entries - # from multiple files in it - return path_matches(node.location.file, filename) - - return True - - return filter - - @staticmethod - def create_content_filter( - kind: Literal["group", "page", "namespace"], options: Mapping[str, Any] - ) -> DoxFilter: - """Returns a filter which matches the contents of the or namespace but not the group or - namepace name or description. - - This allows the groups to be used to structure sections of the documentation rather than to - structure and further document groups of documentation - - As a finder/content filter we only need to match exactly what we're interested in. - """ - - def filter(nstack: NodeStack) -> bool: - node = nstack.node - parent = nstack.parent - - if isinstance(node, parser.Node_memberdefType): - return node.prot == parser.DoxProtectionKind.public + return filter_ - return ( - isinstance(node, parser.Node_refType) - and isinstance(parent, parser.Node_compounddefType) - and parent.kind.value == kind - and nstack.tag == "innerclass" - and node.prot == parser.DoxProtectionKind.public - ) - return filter - - @classmethod - def create_index_filter(cls, options: Mapping[str, Any]) -> DoxFilter: - outline_filter = cls.create_outline_filter(options) - - def filter(nstack: NodeStack) -> bool: - if not outline_filter(nstack): - return False - - node = nstack.node - parent = nstack.parent - return not ( - isinstance(parent, parser.Node_compounddefType) - and ( - ( - isinstance(node, parser.Node_refType) - and nstack.tag in ("innerclass", "innernamespace") - ) - or ( - parent.kind == parser.DoxCompoundKind.group - and isinstance(node, parser.Node_sectiondefType) - and node.kind == parser.DoxSectionKind.func - ) - ) - ) +def create_innerclass_filter(options: DoxNamespaceOptions, outerclass: str = "") -> DoxFilter: + """ + :param outerclass: Should be the class/struct being target by the directive calling this + code. If it is a group or namespace directive then it should be left + blank. It is used when looking for names listed in the :members: option. - return filter + The name should include any additional namespaces that the target class + is in. + """ + allowed: set[parser.DoxProtectionKind] = set() + if "protected-members" in options: + allowed.add(parser.DoxProtectionKind.protected) + if "private-members" in options: + allowed.add(parser.DoxProtectionKind.private) - @staticmethod - def create_file_finder_filter(filename: str) -> DoxFilter: - def filter(nstack: NodeStack) -> bool: - node = nstack.node - return ( - isinstance(node, parser.Node_compounddefType) - and node.kind == parser.DoxCompoundKind.file - and location_matches(node.location, filename) - ) + description = create_description_filter(True, parser.Node_compounddefType) - return filter + members: set[str] | None = None + if "members" in options: + members_str = options["members"] + if members_str and members_str.strip(): + prefix = ("%s::" % outerclass) if outerclass else "" - def create_member_finder_filter( - self, namespace: str, name: str, kinds: Container[parser.MemberKind] | str - ) -> DoxFilter: - """Returns a filter which looks for a member with the specified name and kind.""" + # Matches sphinx-autodoc behaviour of comma separated values + members = set(["%s%s" % (prefix, x.strip()) for x in members_str.split(",")]) + else: + allowed.add(parser.DoxProtectionKind.public) - if isinstance(kinds, str): - kinds = (parser.MemberKind(kinds),) + def filter_(nstack: NodeStack) -> bool: + node = nstack.node + parent = nstack.parent - def node_matches(nstack: NodeStack) -> bool: - node = nstack.node - return ( - isinstance(node, parser.Node_MemberType) - and node.kind in kinds - and node.name == name + return ( + not ( + isinstance(node, parser.Node_refType) + and nstack.tag == "innerclass" + and isinstance(parent, parser.Node_compounddefType) + and parent.kind in CLASS_LIKE_COMPOUNDDEF ) + or node.prot in allowed + or (members is not None and "".join(node) in members) + or description(nstack) + ) - if namespace: - - def filter(nstack: NodeStack) -> bool: - parent = nstack.parent - return ( - node_matches(nstack) - and isinstance(parent, parser.Node_CompoundType) - and parent.kind - in { - parser.CompoundKind.namespace, - parser.CompoundKind.class_, - parser.CompoundKind.struct, - parser.CompoundKind.interface, - } - and parent.name == namespace - ) + return filter_ - else: - ext = tuple(self.app.config.breathe_implementation_filename_extensions) - - def filter(nstack: NodeStack) -> bool: - parent = nstack.parent - return ( - node_matches(nstack) - and isinstance(parent, parser.Node_CompoundType) - and (parent.kind != parser.CompoundKind.file or not parent.name.endswith(ext)) - ) - return filter +def create_class_filter(app: Sphinx, target: str, options: DoxClassOptions) -> DoxFilter: + """Content filter for classes based on various directive options""" - def create_function_and_all_friend_finder_filter(self, namespace: str, name: str) -> DoxFilter: - fun_finder = self.create_member_finder_filter( - namespace, name, (parser.MemberKind.function, parser.MemberKind.friend) - ) + filter_options = set_defaults(app, options) - # Get matching functions but only ones where the parent is not a group. - # We want to skip function entries in groups as we'll find the same - # functions in a file's xml output elsewhere and having more than one - # match is confusing for our logic later on. - def filter(nstack: NodeStack) -> bool: - if not fun_finder(nstack): - return False - - parent = nstack.parent - return not ( - isinstance(parent, parser.Node_CompoundType) - and parent.kind == parser.CompoundKind.group - ) + cm_filter = create_class_member_filter(filter_options) + ic_filter = create_innerclass_filter(filter_options, outerclass=target) + o_filter = create_outline_filter(filter_options) + s_filter = create_show_filter(filter_options) - return filter + return ( + lambda nstack: cm_filter(nstack) + and ic_filter(nstack) + and o_filter(nstack) + and s_filter(nstack) + ) - @staticmethod - def create_enumvalue_finder_filter(name: str) -> DoxFilter: - """Returns a filter which looks for an enumvalue with the specified name.""" - def filter(nstack: NodeStack): - node = nstack.node - return isinstance(node, parser.Node_enumvalueType) and node.name == name +def create_outline_filter(options: Mapping[str, Any]) -> DoxFilter: + if "outline" in options: + return lambda nstack: not isinstance( + nstack.node, (parser.Node_descriptionType, parser.Node_incType) + ) - return filter + return lambda nstack: True + + +def extend_member_with_compound( + d_parser: parser.DoxygenParser, + project_info: ProjectInfo, + m: parser.Node_MemberType, + c: parser.Node_CompoundType, + index: parser.Node_DoxygenTypeIndex, +) -> FinderMatch: + dc = d_parser.parse_compound(c.refid, project_info) + mdef, sdef, cdef = dc.members_by_id[m.refid] + + TN = renderer.TaggedNode + return [ + TN("memberdef", mdef), + TN("sectiondef", sdef), + TN("compounddef", cdef), + TN("doxygen", dc.root), + TN("compound", c), + TN("doxygenindex", index), + ] + + +def member_finder_filter( + app: Sphinx, + namespace: str, + name: str, + d_parser: parser.DoxygenParser, + project_info: ProjectInfo, + kinds: Container[parser.MemberKind] | str, + index: parser.DoxygenIndex, +) -> FinderMatchItr: + """Looks for a member with the specified name and kind.""" + + if isinstance(kinds, str): + kinds = (parser.MemberKind(kinds),) + + if namespace: + c_kinds = { + parser.CompoundKind.namespace, + parser.CompoundKind.class_, + parser.CompoundKind.struct, + parser.CompoundKind.interface, + } + + for m, c in index.members[name]: + if c.kind in c_kinds and c.name == namespace: + if m.kind in kinds: + yield extend_member_with_compound(d_parser, project_info, m, c, index.root) - @staticmethod - def create_compound_finder_filter(name: str, kind: str) -> DoxFilter: - """Returns a filter which looks for a compound with the specified name and kind.""" + else: + ext = tuple(app.config.breathe_implementation_filename_extensions) + + for m, c in index.members[name]: + if c.kind != parser.CompoundKind.file or not c.name.endswith(ext): + if m.kind in kinds: + yield extend_member_with_compound(d_parser, project_info, m, c, index.root) - def filter(nstack: NodeStack): - node = nstack.node - return ( - isinstance(node, parser.Node_CompoundType) - and node.kind.value == kind - and node.name == name - ) - return filter +def compound_finder_filter( + name: str, + kind: str, + index: parser.DoxygenIndex, +) -> FinderMatchItr: + """Looks for a compound with the specified name and kind.""" - @classmethod - def create_finder_filter( - cls, kind: Literal["group", "page", "namespace"], name: str - ) -> DoxFilter: - """Returns a filter which looks for the compound node from the index which is a group node - (kind=group) and has the appropriate name + for c in index.compounds[name]: + if c.kind.value != kind: + continue - The compound node should reference the group file which we can parse for the group - contents. - """ - return cls.create_compound_finder_filter(name, kind) + yield [ + renderer.TaggedNode("compound", c), + renderer.TaggedNode("doxygenindex", index.root), + ] diff --git a/breathe/renderer/sphinxrenderer.py b/breathe/renderer/sphinxrenderer.py index 93ad167f..00839863 100644 --- a/breathe/renderer/sphinxrenderer.py +++ b/breathe/renderer/sphinxrenderer.py @@ -664,7 +664,7 @@ def __init__( state, document: nodes.document, target_handler: TargetHandler, - compound_parser: parser.DoxygenCompoundParser, + dox_parser: parser.DoxygenParser, filter_: DoxFilter, ): self.app = app @@ -675,7 +675,7 @@ def __init__( self.state = state self.document = document self.target_handler = target_handler - self.compound_parser = compound_parser + self.dox_parser = dox_parser self.filter_ = filter_ self.context: RenderContext | None = None @@ -731,6 +731,9 @@ def get_refid(self, refid: str) -> str: else: return refid + def parse_compound(self, refid: str) -> parser.Node_DoxygenType: + return self.dox_parser.parse_compound(refid, self.project_info).root + def get_domain(self) -> str: """Returns the domain for the current node.""" @@ -750,7 +753,7 @@ def get_filename(node) -> Optional[str]: node = node_stack[1].value filename = get_filename(node) if not filename and isinstance(node, parser.Node_CompoundType): - file_data = self.compound_parser.parse(node.refid) + file_data = self.parse_compound(node.refid) filename = get_filename(file_data.compounddef) return self.project_info.domain_for_file(filename) if filename else "" @@ -815,7 +818,9 @@ def run_directive( signode = finder.declarator if self.context.child: - signode.children = [n for n in signode.children if not n.tagname == "desc_addname"] # type: ignore + signode.children = [ + n for n in signode.children if n.tagname != "desc_addname" + ] # type: ignore return nodes def handle_compounddef_declaration( @@ -1283,7 +1288,7 @@ def visit_doxygendef(self, node: parser.Node_DoxygenType) -> list[nodes.Node]: def visit_union(self, node: HasRefID) -> list[nodes.Node]: # Read in the corresponding xml file and process - file_data = self.compound_parser.parse(node.refid) + file_data = self.parse_compound(node.refid) assert len(file_data.compounddef) == 1 nodeDef = file_data.compounddef[0] @@ -1306,7 +1311,7 @@ def visit_union(self, node: HasRefID) -> list[nodes.Node]: def visit_class(self, node: HasRefID) -> list[nodes.Node]: # Read in the corresponding xml file and process - file_data = self.compound_parser.parse(node.refid) + file_data = self.parse_compound(node.refid) assert len(file_data.compounddef) == 1 nodeDef = file_data.compounddef[0] @@ -1374,7 +1379,7 @@ def visit_class(self, node: HasRefID) -> list[nodes.Node]: def visit_namespace(self, node: HasRefID) -> list[nodes.Node]: # Read in the corresponding xml file and process - file_data = self.compound_parser.parse(node.refid) + file_data = self.parse_compound(node.refid) assert len(file_data.compounddef) == 1 nodeDef = file_data.compounddef[0] @@ -1418,7 +1423,7 @@ def visit_compound( | None = None, ) -> list[nodes.Node]: # Read in the corresponding xml file and process - file_data = self.compound_parser.parse(node.refid) + file_data = self.parse_compound(node.refid) assert len(file_data.compounddef) == 1 def def_get_node_info(file_data) -> tuple[str, parser.DoxCompoundKind]: @@ -1708,7 +1713,7 @@ def render_derivedcompoundref(node): if "inner" in options: for cnode in node.innergroup: - file_data = self.compound_parser.parse(cnode.refid) + file_data = self.parse_compound(cnode.refid) assert len(file_data.compounddef) == 1 inner = file_data.compounddef[0] addnode("innergroup", lambda: self.visit_compounddef(inner)) diff --git a/pyproject.toml b/pyproject.toml index 13e8b08d..96c2a160 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -47,9 +47,9 @@ extend-exclude = ''' ^/examples/.* | ^/tests/data/.* ''' -[tool.cibuildwheel] -test-requires = "pytest" -test-command = "pytest {project}/tests" +#[tool.cibuildwheel] +#test-requires = "pytest" +#test-command = "pytest {project}/tests" -[tool.cibuildwheel.linux] -before-all = "yum install -y doxygen" +#[tool.cibuildwheel.linux] +#before-all = "yum install -y doxygen" diff --git a/tests/test_filters.py b/tests/test_filters.py index 580cef28..311b971e 100644 --- a/tests/test_filters.py +++ b/tests/test_filters.py @@ -5,11 +5,10 @@ from typing import NamedTuple from breathe import parser -from breathe.renderer import TaggedNode -from breathe.renderer.filter import FilterFactory, NodeStack +from breathe.renderer import TaggedNode, filter -DEFAULT_OPTS = opts = {"path": "", "project": "", "membergroups": "", "show": ""} +DEFAULT_OPTS = {"path": "", "project": "", "membergroups": "", "show": ""} @pytest.fixture(scope="module") @@ -19,12 +18,12 @@ def class_doc(): class SampleMembers(NamedTuple): - public_field: NodeStack - public_method: NodeStack - protected_field: NodeStack - protected_method: NodeStack - private_field: NodeStack - private_method: NodeStack + public_field: filter.NodeStack + public_method: filter.NodeStack + protected_field: filter.NodeStack + protected_method: filter.NodeStack + private_field: filter.NodeStack + private_method: filter.NodeStack @pytest.fixture @@ -35,7 +34,7 @@ def members(class_doc): for sect in class_doc.compounddef[0].sectiondef: member = sect.memberdef[0] - memberdefs[member.name] = NodeStack( + memberdefs[member.name] = filter.NodeStack( [TaggedNode(None, member), TaggedNode(None, sect)] + common ) @@ -52,7 +51,7 @@ def members(class_doc): def create_class_filter(app, extra_ops): opts = DEFAULT_OPTS.copy() opts.update(extra_ops) - return FilterFactory(app).create_class_filter("Sample", opts) + return filter.create_class_filter(app, "Sample", opts) def test_members(app, members): @@ -112,27 +111,27 @@ def test_specific_class_members(app, members): assert not filter(members.private_method) -def test_nested_class_filtered(app): - app.config.breathe_default_members = [] - - doc = parser.parse_str( - """ - - sample.hpp - Sample - Sample::Inner - - - """ - ) - - compounddef = doc.value.compounddef[0] - ref_outer, ref_inner = compounddef.innerclass - - filter = FilterFactory(app).create_file_filter( - "sample.hpp", DEFAULT_OPTS, init_valid_names=("Sample", "Sample::Inner") - ) - assert filter(NodeStack([TaggedNode("innerclass", ref_outer), TaggedNode(None, compounddef)])) - assert not filter( - NodeStack([TaggedNode("innerclass", ref_inner), TaggedNode(None, compounddef)]) - ) +# def test_nested_class_filtered(app): +# app.config.breathe_default_members = [] +# +# doc = parser.parse_str( +# """ +# +# sample.hpp +# Sample +# Sample::Inner +# +# +# """ +# ) +# +# compounddef = doc.value.compounddef[0] +# ref_outer, ref_inner = compounddef.innerclass +# +# filter_ = filter.create_file_filter( +# app, "sample.hpp", DEFAULT_OPTS, init_valid_names=("Sample", "Sample::Inner") +# ) +# assert filter_(filter.NodeStack([TaggedNode("innerclass", ref_outer), TaggedNode(None, compounddef)])) +# assert not filter_( +# filter.NodeStack([TaggedNode("innerclass", ref_inner), TaggedNode(None, compounddef)]) +# ) diff --git a/tests/test_renderer.py b/tests/test_renderer.py index 895ff62c..53f78eb4 100644 --- a/tests/test_renderer.py +++ b/tests/test_renderer.py @@ -10,6 +10,11 @@ import docutils.parsers.rst from docutils import frontend, nodes, utils +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from breathe.renderer import filter + sphinx.locale.init([], "") COMMON_ARGS_memberdefType = { @@ -29,13 +34,13 @@ def __init__(self): class MockState: def __init__(self, app): from breathe.project import ProjectInfoFactory - from breathe.parser import DoxygenParserFactory + from breathe.parser import DoxygenParser env = sphinx.environment.BuildEnvironment(app) env.setup(app) env.temp_data["docname"] = "mock-doc" env.temp_data["breathe_project_info_factory"] = ProjectInfoFactory(app) - env.temp_data["breathe_parser_factory"] = DoxygenParserFactory(app) + env.temp_data["breathe_dox_parser"] = DoxygenParser(app) if hasattr(frontend, "get_default_settings"): settings = frontend.get_default_settings(docutils.parsers.rst.Parser) else: @@ -130,9 +135,13 @@ class MockFileData: def __init__(self, compounddef): self.compounddef = [compounddef] - def parse(self, compoundname): + class MockCompound: + def __init__(self, root): + self.root = root + + def parse_compound(self, compoundname, project_info): compounddef = self.compound_dict[compoundname] - return self.MockFileData(compounddef) + return self.MockCompound(self.MockFileData(compounddef)) class NodeFinder(nodes.NodeVisitor): @@ -207,7 +216,7 @@ def test_find_node(): def render( - app, member_def, domain=None, show_define_initializer=False, compound_parser=None, options=[] + app, member_def, domain=None, show_define_initializer=False, dox_parser=None, options=[] ): """Render Doxygen *member_def* with *renderer_class*.""" @@ -225,7 +234,7 @@ def render( None, # state None, # document MockTargetHandler(), - compound_parser, + dox_parser, (lambda nstack: True), ) r.context = MockContext(app, [renderer.TaggedNode(None, member_def)], domain, options) @@ -468,11 +477,11 @@ def test_render_innergroup(app): ) assert all( el.astext() != "InnerGroup" - for el in render(app, compound_def, compound_parser=mock_compound_parser) + for el in render(app, compound_def, dox_parser=mock_compound_parser) ) assert any( el.astext() == "InnerGroup" - for el in render(app, compound_def, compound_parser=mock_compound_parser, options=["inner"]) + for el in render(app, compound_def, dox_parser=mock_compound_parser, options=["inner"]) ) @@ -502,7 +511,7 @@ def get_directive(app): return DoxygenFunctionDirective(*cls_args) -def get_matches(datafile) -> tuple[list[str], list[list[renderer.TaggedNode]]]: +def get_matches(datafile) -> tuple[list[str], list[filter.FinderMatch]]: argsstrings = [] with open(os.path.join(os.path.dirname(__file__), "data", datafile), "rb") as fid: doc = parser.parse_file(fid) diff --git a/xml_parser_generator/setuptools_builder.py b/xml_parser_generator/setuptools_builder.py index a126e6b0..7b7674bb 100644 --- a/xml_parser_generator/setuptools_builder.py +++ b/xml_parser_generator/setuptools_builder.py @@ -110,4 +110,3 @@ def build_extensions(self): self.extensions[0].sources.append(source) super().build_extensions() - From 94d5f00db51ea41da841b578c1ecd2fbecf8d94f Mon Sep 17 00:00:00 2001 From: Rouslan Korneychuk Date: Mon, 25 Dec 2023 20:05:41 -0500 Subject: [PATCH 52/65] Parser now converts html entity tags --- .../examples/test_html_entities/compare.xml | 11 + .../examples/test_html_entities/entities.h | 26 + .../examples/test_html_entities/input.rst | 1 + xml_parser_generator/make_parser.py | 42 +- xml_parser_generator/module_template.c.in | 38 +- xml_parser_generator/schema.json | 500 +++++++++--------- 6 files changed, 361 insertions(+), 257 deletions(-) create mode 100644 tests/data/examples/test_html_entities/compare.xml create mode 100644 tests/data/examples/test_html_entities/entities.h create mode 100644 tests/data/examples/test_html_entities/input.rst diff --git a/tests/data/examples/test_html_entities/compare.xml b/tests/data/examples/test_html_entities/compare.xml new file mode 100644 index 00000000..6d3e0fce --- /dev/null +++ b/tests/data/examples/test_html_entities/compare.xml @@ -0,0 +1,11 @@ + + + + + + void noop + + x ¡¢£¤¥¦§¨©ª «¬­®¯°±²³´µ¶ ·¸¹º»¼½¾¿À ÁÂÃÄÅÆÇÈÉÊ ËÌÍÎÏÐÑÒÓÔ ÕÖ×ØÙÚÛÜÝÞ ßàáâãäåæçè éêëìíîïðñò óôõö÷øùúûü ýþÿƒΑΒΓΔΕΖΗ ΘΙΚΛΜΝΞΟΠΡΣΤ ΥΦΧΨΩαβγδεζ ηθικλμνξοπρς στυφχψωϑϒϖ• …′″‾⁄℘ℑℜ™ℵ ←↑→↓↔↵⇐⇑⇒⇓⇔∀ ∂∃∅∇∈∉∋∏∑−∗√ ∝∞∠∧∨∩∪∫∴∼≅≈≠ ≡≤≥⊂⊃⊄⊆⊇⊕⊗⊥⋅⌈ ⌉⌊⌋⟨⟩◊♠♣♥♦Œ œŠšŸˆ˜   ‌‍ ‎‏–—‘’‚“”„† ‡‰‹›€™’x + + + diff --git a/tests/data/examples/test_html_entities/entities.h b/tests/data/examples/test_html_entities/entities.h new file mode 100644 index 00000000..788d28a2 --- /dev/null +++ b/tests/data/examples/test_html_entities/entities.h @@ -0,0 +1,26 @@ +/** + * x ¡¢£¤¥¦§¨©ª + * «¬­®¯°±²³´µ¶ + * ·¸¹º»¼½¾¿À + * ÁÂÃÄÅÆÇÈÉÊ + * ËÌÍÎÏÐÑÒÓÔ + * ÕÖ×ØÙÚÛÜÝÞ + * ßàáâãäåæçè + * éêëìíîïðñò + * óôõö÷øùúûü + * ýþÿƒΑΒΓΔΕΖΗ + * ΘΙΚΛΜΝΞΟΠΡΣΤ + * ΥΦΧΨΩαβγδεζ + * ηθικλμνξοπρς + * στυφχψωϑϒϖ• + * …′″‾⁄℘ℑℜ™ℵ + * ←↑→↓↔↵⇐⇑⇒⇓⇔∀ + * ∂∃∅∇∈∉∋∏∑−∗√ + * ∝∞∠∧∨∩∪∫∴∼≅≈≠ + * ≡≤≥⊂⊃⊄⊆⊇⊕⊗⊥⋅⌈ + * ⌉⌊⌋⟨⟩◊♠♣♥♦Œ + * œŠšŸˆ˜   ‌‍ + * ‎‏–—‘’‚“”„† + * ‡‰‹›€&tm;'x + */ +void noop(); diff --git a/tests/data/examples/test_html_entities/input.rst b/tests/data/examples/test_html_entities/input.rst new file mode 100644 index 00000000..ba68f4ae --- /dev/null +++ b/tests/data/examples/test_html_entities/input.rst @@ -0,0 +1 @@ +.. doxygenfunction:: noop diff --git a/xml_parser_generator/make_parser.py b/xml_parser_generator/make_parser.py index e7e75373..37439652 100644 --- a/xml_parser_generator/make_parser.py +++ b/xml_parser_generator/make_parser.py @@ -1,5 +1,6 @@ from __future__ import annotations +import re import sys import json import enum @@ -32,6 +33,8 @@ ("empty", "None"), ] +RE_CHAR_TYPE = re.compile(r"\s*#\s*char\s*\(([^\s)]+)\s*\)\s*") + def comma_join(items: Sequence[str], indent: int = 4): if len(items) < SPLIT_LINE_ITEM_THRESHOLD: @@ -88,6 +91,10 @@ def __str__(self): def content_names(self) -> Iterable[str]: return [] + @property + def extra_args(self) -> str: + return "" + if TYPE_CHECKING: @property @@ -110,6 +117,20 @@ class SpType(BuiltinType): pass +@dataclasses.dataclass() +class CodePointType(BuiltinType): + char: int + + def __init__(self, char: int): + self.name = "const_char" + self.py_name = "str" + self.char = char + + @property + def extra_args(self) -> str: + return f",{self.char:#x}" + + @dataclasses.dataclass() class BuiltinAttributeType(BuiltinType, AttributeType): pass @@ -187,9 +208,13 @@ def py_union_ref(self) -> list[str]: def py_union_list(self) -> list[str]: by_type = collections.defaultdict(list) + needs_str = False for name, t in self.content.items(): assert isinstance(t, SchemaType) - by_type[t.py_name].append(name) + if not isinstance(t, (SpType, CodePointType)): + by_type[t.py_name].append(name) + else: + needs_str = True types = [ "TaggedValue[Literal[{}], {}]".format( comma_join(sorted(f"'{n}'" for n in names), 26), t @@ -204,6 +229,11 @@ def py_union_list(self) -> list[str]: str_included = True if self.allow_text and not str_included: types.append("str") + elif needs_str: + raise ValueError( + f'type "{self.name}" cannot have #spType or ' + + '#char(...) items unless "allow_text" is true' + ) return types @@ -248,6 +278,14 @@ def unknown_type_error(ref: str, context: str, is_element: bool) -> NoReturn: def check_type_ref(schema: Schema, ref: str, context: str, is_element: bool = True) -> SchemaType: t = schema.types.get(ref) if t is None: + m = RE_CHAR_TYPE.fullmatch(ref) + if m is not None: + char = int(m.group(1), 16) + if char > 0xFFFF: + raise ValueError( + f'"char" type at "{context}" must have a value between 0 and 0xFFFF' + ) + return CodePointType(char) unknown_type_error(ref, context, is_element) return t @@ -515,7 +553,7 @@ def __call__(self): "builtin_t": (lambda x: isinstance(x, BuiltinType)), "enumeration_t": (lambda x: isinstance(x, SchemaEnum)), "char_enum_t": (lambda x: isinstance(x, SchemaCharEnum)), - "appends_str": (lambda x: isinstance(x, SpType)), + "appends_str": (lambda x: isinstance(x, (SpType, CodePointType))), "used_directly": used_directly, "allow_text": allow_text, "has_attributes": has_attributes, diff --git a/xml_parser_generator/module_template.c.in b/xml_parser_generator/module_template.c.in index a5b59beb..b1810ebe 100644 --- a/xml_parser_generator/module_template.c.in +++ b/xml_parser_generator/module_template.c.in @@ -1279,6 +1279,34 @@ static int node_start_spType(parse_state *state,PyObject **dest,const XML_Char * return 1; } +static int node_start_const_char(parse_state *state,PyObject **dest,const XML_Char **attr,int c) { + parse_callbacks *cb; + PyObject *c_obj; + + for(; *attr != NULL; attr += 2) { + if(warn_unexpected_attribute(state,attr[0])) return -1; + } + + c_obj = PyUnicode_FromOrdinal(c); + if(c_obj == NULL) return -1; + if(*dest == NULL) *dest = c_obj; + else { + int r = append_str_obj(dest,c_obj); + Py_DECREF(c_obj); + if(r) return -1; + } + + cb = push_callbacks(state); + if(cb == NULL) return -1; + + cb->value = NULL; + cb->cs_call = NULL; + cb->f_call = NULL; + cb->t_call = NULL; + + return 1; +} + static void raise_dup_field_error(const char *name) { PyErr_Format(PyExc_TypeError,"received more than one value for \"%s\"",name); } @@ -1485,14 +1513,14 @@ static int node_class_child__{$ type $}(parse_state *state,{$ 'PyObject **fields { frozen_list *fl = (frozen_list*)fields[FIELD__{$ type $}__{$ cref.py_name $}]; if(frozen_list_push_object(fl,NULL,NODE_LIST_INITIAL_CAPACITY)) return -1; - return node_{$ 'start_' if cref.type is builtin_t else 'class_start__' $}{$ cref.type $}(state,fl->content + (fl->size-1),attr); + return node_{$ 'start_' if cref.type is builtin_t else 'class_start__' $}{$ cref.type $}(state,fl->content + (fl->size-1),attr{$ cref.type.extra_args $}); } //% else if(fields[FIELD__{$ type $}__{$ cref.py_name $}] != NULL) { raise_duplicate_element_error(state,"{$ cref.name $}"); return -1; } - return node_{$ 'start_' if cref.type is builtin_t else 'class_start__' $}{$ cref.type $}(state,&fields[FIELD__{$ type $}__{$ cref.py_name $}],attr); + return node_{$ 'start_' if cref.type is builtin_t else 'class_start__' $}{$ cref.type $}(state,&fields[FIELD__{$ type $}__{$ cref.py_name $}],attr{$ cref.type.extra_args $}); //% endif //% endfor //% for cname,ctype in type|content @@ -1508,7 +1536,7 @@ static int node_class_child__{$ type $}(parse_state *state,{$ 'PyObject **fields &n->base, NODE_LIST_INITIAL_CAPACITY); if(dest == NULL) return -1; - return node_{$ 'start_' if ctype is builtin_t else 'class_start__' $}{$ ctype $}(state,dest,attr); + return node_{$ 'start_' if ctype is builtin_t else 'class_start__' $}{$ ctype $}(state,dest,attr{$ ctype.extra_args $}); } //% elif type is content_union { @@ -1524,11 +1552,11 @@ static int node_class_child__{$ type $}(parse_state *state,{$ 'PyObject **fields PyObject **dest = frozen_list_push_tagged_value(state->py,TAGGED_UNION_NAME__{$ cname $},&n->base,NODE_LIST_INITIAL_CAPACITY); if(dest == NULL) return -1; //% endif - return node_{$ 'start_' if ctype is builtin_t else 'class_start__' $}{$ ctype $}(state,dest,attr); + return node_{$ 'start_' if ctype is builtin_t else 'class_start__' $}{$ ctype $}(state,dest,attr{$ ctype.extra_args $}); } //% else if(frozen_list_push_object(&n->base,NULL,NODE_LIST_INITIAL_CAPACITY)) return -1; - return node_{$ 'start_' if ctype is builtin_t else 'class_start__' $}{$ ctype $}(state,n->base.content + (n->base.size-1),attr); + return node_{$ 'start_' if ctype is builtin_t else 'class_start__' $}{$ ctype $}(state,n->base.content + (n->base.size-1),attr{$ ctype.extra_args $}); //% endif //% endfor default: diff --git a/xml_parser_generator/schema.json b/xml_parser_generator/schema.json index 7c956af2..f6cdb208 100644 --- a/xml_parser_generator/schema.json +++ b/xml_parser_generator/schema.json @@ -649,256 +649,256 @@ "formula": "docFormulaType", "ref": "docRefTextType", "emoji": "docEmojiType", - "linebreak": "#empty", - "nonbreakablespace": "#empty", - "iexcl": "#empty", - "cent": "#empty", - "pound": "#empty", - "curren": "#empty", - "yen": "#empty", - "brvbar": "#empty", - "sect": "#empty", - "umlaut": "#empty", - "copy": "#empty", - "ordf": "#empty", - "laquo": "#empty", - "not": "#empty", - "shy": "#empty", - "registered": "#empty", - "macr": "#empty", - "deg": "#empty", - "plusmn": "#empty", - "sup2": "#empty", - "sup3": "#empty", - "acute": "#empty", - "micro": "#empty", - "para": "#empty", - "middot": "#empty", - "cedil": "#empty", - "sup1": "#empty", - "ordm": "#empty", - "raquo": "#empty", - "frac14": "#empty", - "frac12": "#empty", - "frac34": "#empty", - "iquest": "#empty", - "Agrave": "#empty", - "Aacute": "#empty", - "Acirc": "#empty", - "Atilde": "#empty", - "Aumlaut": "#empty", - "Aring": "#empty", - "AElig": "#empty", - "Ccedil": "#empty", - "Egrave": "#empty", - "Eacute": "#empty", - "Ecirc": "#empty", - "Eumlaut": "#empty", - "Igrave": "#empty", - "Iacute": "#empty", - "Icirc": "#empty", - "Iumlaut": "#empty", - "ETH": "#empty", - "Ntilde": "#empty", - "Ograve": "#empty", - "Oacute": "#empty", - "Ocirc": "#empty", - "Otilde": "#empty", - "Oumlaut": "#empty", - "times": "#empty", - "Oslash": "#empty", - "Ugrave": "#empty", - "Uacute": "#empty", - "Ucirc": "#empty", - "Uumlaut": "#empty", - "Yacute": "#empty", - "THORN": "#empty", - "szlig": "#empty", - "agrave": "#empty", - "aacute": "#empty", - "acirc": "#empty", - "atilde": "#empty", - "aumlaut": "#empty", - "aring": "#empty", - "aelig": "#empty", - "ccedil": "#empty", - "egrave": "#empty", - "eacute": "#empty", - "ecirc": "#empty", - "eumlaut": "#empty", - "igrave": "#empty", - "iacute": "#empty", - "icirc": "#empty", - "iumlaut": "#empty", - "eth": "#empty", - "ntilde": "#empty", - "ograve": "#empty", - "oacute": "#empty", - "ocirc": "#empty", - "otilde": "#empty", - "oumlaut": "#empty", - "divide": "#empty", - "oslash": "#empty", - "ugrave": "#empty", - "uacute": "#empty", - "ucirc": "#empty", - "uumlaut": "#empty", - "yacute": "#empty", - "thorn": "#empty", - "yumlaut": "#empty", - "fnof": "#empty", - "Alpha": "#empty", - "Beta": "#empty", - "Gamma": "#empty", - "Delta": "#empty", - "Epsilon": "#empty", - "Zeta": "#empty", - "Eta": "#empty", - "Theta": "#empty", - "Iota": "#empty", - "Kappa": "#empty", - "Lambda": "#empty", - "Mu": "#empty", - "Nu": "#empty", - "Xi": "#empty", - "Omicron": "#empty", - "Pi": "#empty", - "Rho": "#empty", - "Sigma": "#empty", - "Tau": "#empty", - "Upsilon": "#empty", - "Phi": "#empty", - "Chi": "#empty", - "Psi": "#empty", - "Omega": "#empty", - "alpha": "#empty", - "beta": "#empty", - "gamma": "#empty", - "delta": "#empty", - "epsilon": "#empty", - "zeta": "#empty", - "eta": "#empty", - "theta": "#empty", - "iota": "#empty", - "kappa": "#empty", - "lambda": "#empty", - "mu": "#empty", - "nu": "#empty", - "xi": "#empty", - "omicron": "#empty", - "pi": "#empty", - "rho": "#empty", - "sigmaf": "#empty", - "sigma": "#empty", - "tau": "#empty", - "upsilon": "#empty", - "phi": "#empty", - "chi": "#empty", - "psi": "#empty", - "omega": "#empty", - "thetasym": "#empty", - "upsih": "#empty", - "piv": "#empty", - "bull": "#empty", - "hellip": "#empty", - "prime": "#empty", - "Prime": "#empty", - "oline": "#empty", - "frasl": "#empty", - "weierp": "#empty", - "imaginary": "#empty", - "real": "#empty", - "trademark": "#empty", - "alefsym": "#empty", - "larr": "#empty", - "uarr": "#empty", - "rarr": "#empty", - "darr": "#empty", - "harr": "#empty", - "crarr": "#empty", - "lArr": "#empty", - "uArr": "#empty", - "rArr": "#empty", - "dArr": "#empty", - "hArr": "#empty", - "forall": "#empty", - "part": "#empty", - "exist": "#empty", - "empty": "#empty", - "nabla": "#empty", - "isin": "#empty", - "notin": "#empty", - "ni": "#empty", - "prod": "#empty", - "sum": "#empty", - "minus": "#empty", - "lowast": "#empty", - "radic": "#empty", - "prop": "#empty", - "infin": "#empty", - "ang": "#empty", - "and": "#empty", - "or": "#empty", - "cap": "#empty", - "cup": "#empty", - "int": "#empty", - "there4": "#empty", - "sim": "#empty", - "cong": "#empty", - "asymp": "#empty", - "ne": "#empty", - "equiv": "#empty", - "le": "#empty", - "ge": "#empty", - "sub": "#empty", - "sup": "#empty", - "nsub": "#empty", - "sube": "#empty", - "supe": "#empty", - "oplus": "#empty", - "otimes": "#empty", - "perp": "#empty", - "sdot": "#empty", - "lceil": "#empty", - "rceil": "#empty", - "lfloor": "#empty", - "rfloor": "#empty", - "lang": "#empty", - "rang": "#empty", - "loz": "#empty", - "spades": "#empty", - "clubs": "#empty", - "hearts": "#empty", - "diams": "#empty", - "OElig": "#empty", - "oelig": "#empty", - "Scaron": "#empty", - "scaron": "#empty", - "Yumlaut": "#empty", - "circ": "#empty", - "tilde": "#empty", - "ensp": "#empty", - "emsp": "#empty", - "thinsp": "#empty", - "zwnj": "#empty", - "zwj": "#empty", - "lrm": "#empty", - "rlm": "#empty", - "ndash": "#empty", - "mdash": "#empty", - "lsquo": "#empty", - "rsquo": "#empty", - "sbquo": "#empty", - "ldquo": "#empty", - "rdquo": "#empty", - "bdquo": "#empty", - "dagger": "#empty", - "Dagger": "#empty", - "permil": "#empty", - "lsaquo": "#empty", - "rsaquo": "#empty", - "euro": "#empty", - "tm": "#empty" + "linebreak": "#char(A)", + "nonbreakablespace": "#char(A0)", + "iexcl": "#char(A1)", + "cent": "#char(A2)", + "pound": "#char(A3)", + "curren": "#char(A4)", + "yen": "#char(A5)", + "brvbar": "#char(A6)", + "sect": "#char(A7)", + "umlaut": "#char(A8)", + "copy": "#char(A9)", + "ordf": "#char(AA)", + "laquo": "#char(AB)", + "not": "#char(AC)", + "shy": "#char(AD)", + "registered": "#char(AE)", + "macr": "#char(AF)", + "deg": "#char(B0)", + "plusmn": "#char(B1)", + "sup2": "#char(B2)", + "sup3": "#char(B3)", + "acute": "#char(B4)", + "micro": "#char(B5)", + "para": "#char(B6)", + "middot": "#char(B7)", + "cedil": "#char(B8)", + "sup1": "#char(B9)", + "ordm": "#char(BA)", + "raquo": "#char(BB)", + "frac14": "#char(BC)", + "frac12": "#char(BD)", + "frac34": "#char(BE)", + "iquest": "#char(BF)", + "Agrave": "#char(C0)", + "Aacute": "#char(C1)", + "Acirc": "#char(C2)", + "Atilde": "#char(C3)", + "Aumlaut": "#char(C4)", + "Aring": "#char(C5)", + "AElig": "#char(C6)", + "Ccedil": "#char(C7)", + "Egrave": "#char(C8)", + "Eacute": "#char(C9)", + "Ecirc": "#char(CA)", + "Eumlaut": "#char(CB)", + "Igrave": "#char(CC)", + "Iacute": "#char(CD)", + "Icirc": "#char(CE)", + "Iumlaut": "#char(CF)", + "ETH": "#char(D0)", + "Ntilde": "#char(D1)", + "Ograve": "#char(D2)", + "Oacute": "#char(D3)", + "Ocirc": "#char(D4)", + "Otilde": "#char(D5)", + "Oumlaut": "#char(D6)", + "times": "#char(D7)", + "Oslash": "#char(D8)", + "Ugrave": "#char(D9)", + "Uacute": "#char(DA)", + "Ucirc": "#char(DB)", + "Uumlaut": "#char(DC)", + "Yacute": "#char(DD)", + "THORN": "#char(DE)", + "szlig": "#char(DF)", + "agrave": "#char(E0)", + "aacute": "#char(E1)", + "acirc": "#char(E2)", + "atilde": "#char(E3)", + "aumlaut": "#char(E4)", + "aring": "#char(E5)", + "aelig": "#char(E6)", + "ccedil": "#char(E7)", + "egrave": "#char(E8)", + "eacute": "#char(E9)", + "ecirc": "#char(EA)", + "eumlaut": "#char(EB)", + "igrave": "#char(EC)", + "iacute": "#char(ED)", + "icirc": "#char(EE)", + "iumlaut": "#char(EF)", + "eth": "#char(F0)", + "ntilde": "#char(F1)", + "ograve": "#char(F2)", + "oacute": "#char(F3)", + "ocirc": "#char(F4)", + "otilde": "#char(F5)", + "oumlaut": "#char(F6)", + "divide": "#char(F7)", + "oslash": "#char(F8)", + "ugrave": "#char(F9)", + "uacute": "#char(FA)", + "ucirc": "#char(FB)", + "uumlaut": "#char(FC)", + "yacute": "#char(FD)", + "thorn": "#char(FE)", + "yumlaut": "#char(FF)", + "fnof": "#char(192)", + "Alpha": "#char(391)", + "Beta": "#char(392)", + "Gamma": "#char(393)", + "Delta": "#char(394)", + "Epsilon": "#char(395)", + "Zeta": "#char(396)", + "Eta": "#char(397)", + "Theta": "#char(398)", + "Iota": "#char(399)", + "Kappa": "#char(39A)", + "Lambda": "#char(39B)", + "Mu": "#char(39C)", + "Nu": "#char(39D)", + "Xi": "#char(39E)", + "Omicron": "#char(39F)", + "Pi": "#char(3A0)", + "Rho": "#char(3A1)", + "Sigma": "#char(3A3)", + "Tau": "#char(3A4)", + "Upsilon": "#char(3A5)", + "Phi": "#char(3A6)", + "Chi": "#char(3A7)", + "Psi": "#char(3A8)", + "Omega": "#char(3A9)", + "alpha": "#char(3B1)", + "beta": "#char(3B2)", + "gamma": "#char(3B3)", + "delta": "#char(3B4)", + "epsilon": "#char(3B5)", + "zeta": "#char(3B6)", + "eta": "#char(3B7)", + "theta": "#char(3B8)", + "iota": "#char(3B9)", + "kappa": "#char(3BA)", + "lambda": "#char(3BB)", + "mu": "#char(3BC)", + "nu": "#char(3BD)", + "xi": "#char(3BE)", + "omicron": "#char(3BF)", + "pi": "#char(3C0)", + "rho": "#char(3C1)", + "sigmaf": "#char(3C2)", + "sigma": "#char(3C3)", + "tau": "#char(3C4)", + "upsilon": "#char(3C5)", + "phi": "#char(3C6)", + "chi": "#char(3C7)", + "psi": "#char(3C8)", + "omega": "#char(3C9)", + "thetasym": "#char(3D1)", + "upsih": "#char(3D2)", + "piv": "#char(3D6)", + "bull": "#char(2022)", + "hellip": "#char(2026)", + "prime": "#char(2032)", + "Prime": "#char(2033)", + "oline": "#char(203E)", + "frasl": "#char(2044)", + "weierp": "#char(2118)", + "imaginary": "#char(2111)", + "real": "#char(211C)", + "trademark": "#char(2122)", + "alefsym": "#char(2135)", + "larr": "#char(2190)", + "uarr": "#char(2191)", + "rarr": "#char(2192)", + "darr": "#char(2193)", + "harr": "#char(2194)", + "crarr": "#char(21B5)", + "lArr": "#char(21D0)", + "uArr": "#char(21D1)", + "rArr": "#char(21D2)", + "dArr": "#char(21D3)", + "hArr": "#char(21D4)", + "forall": "#char(2200)", + "part": "#char(2202)", + "exist": "#char(2203)", + "empty": "#char(2205)", + "nabla": "#char(2207)", + "isin": "#char(2208)", + "notin": "#char(2209)", + "ni": "#char(220B)", + "prod": "#char(220F)", + "sum": "#char(2211)", + "minus": "#char(2212)", + "lowast": "#char(2217)", + "radic": "#char(221A)", + "prop": "#char(221D)", + "infin": "#char(221E)", + "ang": "#char(2220)", + "and": "#char(2227)", + "or": "#char(2228)", + "cap": "#char(2229)", + "cup": "#char(222A)", + "int": "#char(222B)", + "there4": "#char(2234)", + "sim": "#char(223C)", + "cong": "#char(2245)", + "asymp": "#char(2248)", + "ne": "#char(2260)", + "equiv": "#char(2261)", + "le": "#char(2264)", + "ge": "#char(2265)", + "sub": "#char(2282)", + "sup": "#char(2283)", + "nsub": "#char(2284)", + "sube": "#char(2286)", + "supe": "#char(2287)", + "oplus": "#char(2295)", + "otimes": "#char(2297)", + "perp": "#char(22A5)", + "sdot": "#char(22C5)", + "lceil": "#char(2308)", + "rceil": "#char(2309)", + "lfloor": "#char(230A)", + "rfloor": "#char(230B)", + "lang": "#char(27E8)", + "rang": "#char(27E9)", + "loz": "#char(25CA)", + "spades": "#char(2660)", + "clubs": "#char(2663)", + "hearts": "#char(2665)", + "diams": "#char(2666)", + "OElig": "#char(152)", + "oelig": "#char(153)", + "Scaron": "#char(160)", + "scaron": "#char(161)", + "Yumlaut": "#char(178)", + "circ": "#char(2C6)", + "tilde": "#char(2DC)", + "ensp": "#char(2002)", + "emsp": "#char(2003)", + "thinsp": "#char(2009)", + "zwnj": "#char(200C)", + "zwj": "#char(200D)", + "lrm": "#char(200E)", + "rlm": "#char(200F)", + "ndash": "#char(2013)", + "mdash": "#char(2014)", + "lsquo": "#char(2018)", + "rsquo": "#char(2019)", + "sbquo": "#char(201A)", + "ldquo": "#char(201C)", + "rdquo": "#char(201D)", + "bdquo": "#char(201E)", + "dagger": "#char(2020)", + "Dagger": "#char(2021)", + "permil": "#char(2030)", + "lsaquo": "#char(2039)", + "rsaquo": "#char(203A)", + "euro": "#char(20AC)", + "tm": "#char(2122)" } }, "docCmdGroup": { From fb2fd792495b7e08b2e7602f254a71a4b99f1d24 Mon Sep 17 00:00:00 2001 From: Rouslan Korneychuk Date: Tue, 26 Dec 2023 06:57:23 -0500 Subject: [PATCH 53/65] Minor fix and added new doc-strings --- breathe/project.py | 6 +- tests/test_parser.py | 64 ++++++++- xml_parser_generator/make_parser.py | 166 ++++++++++++++++++++-- xml_parser_generator/module_template.c.in | 4 +- 4 files changed, 213 insertions(+), 27 deletions(-) diff --git a/breathe/project.py b/breathe/project.py index bfd700ec..5c8cef29 100644 --- a/breathe/project.py +++ b/breathe/project.py @@ -134,7 +134,7 @@ def __init__(self, app: Sphinx): self._default_build_dir = str(app.doctreedir.parent) self.project_count = 0 self.project_info_store: dict[str, ProjectInfo] = {} - self.project_info_for_auto_store: dict[str, AutoProjectInfo] = {} + self.project_info_for_auto_store: dict[str, ProjectInfo] = {} self.auto_project_info_store: dict[str, AutoProjectInfo] = {} @property @@ -195,7 +195,7 @@ def create_project_info(self, options: ProjectOptions) -> ProjectInfo: self.project_info_store[path] = project_info return project_info - def store_project_info_for_auto(self, name: str, project_info: AutoProjectInfo) -> None: + def store_project_info_for_auto(self, name: str, project_info: ProjectInfo) -> None: """Stores the project info by name for later extraction by the auto directives. Stored separately to the non-auto project info objects as they should never overlap. @@ -203,7 +203,7 @@ def store_project_info_for_auto(self, name: str, project_info: AutoProjectInfo) self.project_info_for_auto_store[name] = project_info - def retrieve_project_info_for_auto(self, options) -> AutoProjectInfo: + def retrieve_project_info_for_auto(self, options) -> ProjectInfo: """Retrieves the project info by name for later extraction by the auto directives. Looks for the 'project' entry in the options dictionary. This is a less than ideal API but diff --git a/tests/test_parser.py b/tests/test_parser.py index 0197d6f9..01c55476 100644 --- a/tests/test_parser.py +++ b/tests/test_parser.py @@ -9,16 +9,16 @@ def test_bad_content(): Sample sample.hpp - + int int public_field public_field Sample::public_field - + - +
""" @@ -34,7 +34,7 @@ def test_malformed(): Sample sample.hpp - + int""" with pytest.raises(parser.ParseError): @@ -51,16 +51,16 @@ def test_unknown_tag(): sample.hpp - + int int public_field public_field Sample::public_field - + - + """ @@ -68,3 +68,53 @@ def test_unknown_tag(): parser.parse_str(xml) assert len(record) == 1 assert "Warning on line 5:" in str(record[0].message) + +def test_string_coalesce(): + xml = """ + + + Sample + sample.hpp + + + int + int a + + a + Sample::a + ab + + + + int + int b + + b + Sample::b + c + + + + + + """ + + doc = parser.parse_str(xml).value + assert isinstance(doc, parser.Node_DoxygenType) + members = doc.compounddef[0].sectiondef[0].memberdef + desc1 = members[0].detaileddescription + desc2 = members[1].detaileddescription + assert desc1 is not None + assert desc2 is not None + tv1 = desc1[0] + tv2 = desc2[0] + assert isinstance(tv1, parser.TaggedValue) + assert isinstance(tv2, parser.TaggedValue) + p1 = tv1.value + p2 = tv2.value + assert isinstance(p1, parser.Node_docParaType) + assert isinstance(p2, parser.Node_docParaType) + assert len(p1) == 1 + assert len(p2) == 1 + assert p1[0] == "a\N{NO-BREAK SPACE}b" + assert p2[0] == "\N{TRADE MARK SIGN}c\N{PLUS-MINUS SIGN}" diff --git a/xml_parser_generator/make_parser.py b/xml_parser_generator/make_parser.py index 37439652..2a3a687e 100644 --- a/xml_parser_generator/make_parser.py +++ b/xml_parser_generator/make_parser.py @@ -44,20 +44,64 @@ def comma_join(items: Sequence[str], indent: int = 4): class ContentType(enum.Enum): + """A value specifying how children are organized when parsing an array-type + element""" + bare = enum.auto() + """Child values are added directly to the array. + + There can only be one child type, which can be an element or text. + """ + tuple = enum.auto() + """Child elements are grouped into named tuple-like objects. + + Each batch of child elements must appear in order in the XML document. Text + content is not allowed.""" + union = enum.auto() + """Each item is either a tagged union (an instance of TaggedValue) or a + plain string""" @dataclasses.dataclass() class TypeRef: + """An XML element""" + name: str + """the name of the element as it will appear in the XML file""" + py_name: str + """The Python field name that will hold the parsed value. + + This will be different from "name" if "name" is not a valid Python + identifier. + """ + type: str | SchemaType + """While the schema is being parsed, this will be a string containing the + name of attribute's type. After parsing, this is set to the object + representing the type. + """ + is_list: bool + """Whether this element can appear more than once in its context""" + min_items: Literal[0] | Literal[1] + """If this is zero, the element is optional. + + This can only be zero or one. + """ def py_type(self, as_param=False) -> str: + """Get the Python type annotation describing the type of this element. + + If "as_param" is True, this represents a parameter type that can be + converted to the actual type. For example with a given type "T": the + generated parser uses FrozenList[T] to store arrays, but constructors + accept Iterable[T] for array fields. + """ + assert isinstance(self.type, SchemaType) if self.is_list: container = "Iterable" if as_param else "FrozenList" @@ -69,12 +113,38 @@ def py_type(self, as_param=False) -> str: @dataclasses.dataclass() class Attribute: + """An XML attribute""" + name: str + """the name of the attribute as it will appear in the XML file""" + py_name: str + """The Python field name that will hold the parsed value. + + This will be different from "name" if "name" is not a valid Python + identifier. + """ + type: str | AttributeType + """While the schema is being parsed, this will be a string containing the + name of attribute's type. After parsing, this is set to the object + representing the type. + """ + optional: bool + """Whether the attribute may be omitted. + + Fields corresponding to omitted attributes are set to None""" def py_type(self, as_param=False) -> str: + """Get the Python type annotation describing the type of this attribute. + + If "as_param" is True, this represents a parameter type that can be + converted to the actual type. For example with a given type "T": the + generated parser uses FrozenList[T] to store arrays, but constructors + accept Iterable[T] for array fields. + """ + assert isinstance(self.type, SchemaType) if self.optional: return f"{self.type.py_name} | None" @@ -86,6 +156,11 @@ class SchemaType: name: str def __str__(self): + """This is equal to self.name. + + This is important for the Jinja template, which frequently uses the + names of types. + """ return self.name def content_names(self) -> Iterable[str]: @@ -93,6 +168,8 @@ def content_names(self) -> Iterable[str]: @property def extra_args(self) -> str: + """A string to add before the closing bracket of the C function call to + the type's element start handler""" return "" if TYPE_CHECKING: @@ -104,22 +181,35 @@ def py_name(self) -> str: @dataclasses.dataclass() class AttributeType(SchemaType): - pass + """A type that can be used in attributes and elements. + + When used for an element, the element will not have any attributes or child + elements. + """ @dataclasses.dataclass() class BuiltinType(SchemaType): py_name: str + """the name of the Python data type that will represent a value of this + type""" @dataclasses.dataclass() class SpType(BuiltinType): - pass + """This element represents an arbitrary character whose code point is + given in the attribute "value". + + If "value" isn't present, the character is a space. + """ @dataclasses.dataclass() class CodePointType(BuiltinType): + """This element represents a specific character.""" + char: int + """The unicode code-point of the character""" def __init__(self, char: int): self.name = "const_char" @@ -143,11 +233,24 @@ class OtherAttrAction(enum.Enum): @dataclasses.dataclass() class ElementType(SchemaType): + """An element type specified by the schema""" + bases: list[str | SchemaType] + """the types to derive from""" + attributes: dict[str, Attribute] + """XML attributes""" + other_attr: OtherAttrAction + """how to handle attributes not in "attributes" """ + children: dict[str, TypeRef] + """XML child elements""" + used_directly: bool + """Each element that is used directly, corresponds to a separate Python + class. If this is False, this element is only used as a base element for + other types and does not produce any Python classes.""" def fields(self) -> Iterable[TypeRef | Attribute]: yield from self.attributes.values() @@ -170,16 +273,30 @@ def py_name(self) -> str: @dataclasses.dataclass() class TagOnlyElement(ElementType): - pass + """A simple element that cannot contain text (not counting whitespace) and + does not preserve the order of its child elements""" @dataclasses.dataclass() class ListElement(ElementType): + """An element type that gets parsed as an array type. + + The items of the array depend on "content", "content_type" and "allow_text". + """ + min_items: int + content: dict[str, str | SchemaType] + """Child elements that will be stored as array items. + + While the schema is being parsed, the values will be strings containing the + names of the elements' types. After parsing, they are set to the objects + representing the types. + """ + content_type: ContentType + allow_text: bool - sp_tag: str | None = None def content_names(self) -> Iterable[str]: for b in self.bases: @@ -193,13 +310,6 @@ def all_content(self): yield from b.content.values() yield from self.content.values() - def py_item_type_union_size(self) -> int: - size = len(self.content) if self.content_type == ContentType.union else 0 - for b in self.bases: - if isinstance(b, ListElement): - size += b.py_item_type_union_size() - return size - def py_union_ref(self) -> list[str]: types = self.py_union_list() if len(types) <= 1: @@ -207,6 +317,12 @@ def py_union_ref(self) -> list[str]: return ["ListItem_" + self.name] def py_union_list(self) -> list[str]: + """Return a list of type annotations, the union of which, represent + every possible value of this array's elements. + + This assumes self.content_type == ContentType.union. + """ + assert self.content_type == ContentType.union by_type = collections.defaultdict(list) needs_str = False for name, t in self.content.items(): @@ -250,6 +366,11 @@ class EnumEntry(NamedTuple): @dataclasses.dataclass() class SchemaEnum(AttributeType): + """A type representing an enumeration. + + This type is represented in python with enum.Enum. + """ + children: list[EnumEntry] hash: HashData | None = None @@ -263,6 +384,11 @@ def py_name(self) -> str: @dataclasses.dataclass() class SchemaCharEnum(AttributeType): + """An enumeration type whose elements are single characters. + + Unlike SchemaEnum, the values are represented as strings. + """ + values: str @property @@ -276,14 +402,16 @@ def unknown_type_error(ref: str, context: str, is_element: bool) -> NoReturn: def check_type_ref(schema: Schema, ref: str, context: str, is_element: bool = True) -> SchemaType: + """Get the schema type that represent the type named by "ref" """ + t = schema.types.get(ref) if t is None: m = RE_CHAR_TYPE.fullmatch(ref) if m is not None: char = int(m.group(1), 16) - if char > 0xFFFF: + if char > 0x10FFFF: raise ValueError( - f'"char" type at "{context}" must have a value between 0 and 0xFFFF' + f'"char" type at "{context}" must have a value between 0 and 0x10FFFF inclusive' ) return CodePointType(char) unknown_type_error(ref, context, is_element) @@ -291,6 +419,9 @@ def check_type_ref(schema: Schema, ref: str, context: str, is_element: bool = Tr def check_attr_type_ref(schema: Schema, ref: str, context: str) -> AttributeType: + """Get the schema type that represent the type named by "ref" and raise an + exception if it's not usable in an XML attribute""" + r = check_type_ref(schema, ref, context, False) if isinstance(r, AttributeType): return r @@ -299,6 +430,8 @@ def check_attr_type_ref(schema: Schema, ref: str, context: str) -> AttributeType def check_py_name(name: str) -> None: + """Raise ValueError if "name" is not suitable as a Python field name""" + if (not name.isidentifier()) or keyword.iskeyword(name): raise ValueError(f'"{name}" is not a valid Python identifier') if name == "_children": @@ -306,8 +439,8 @@ def check_py_name(name: str) -> None: def resolve_refs(schema: Schema) -> tuple[list[str], list[str]]: - """Check that all referenced types exist and return the lists of all - element names and attribute names""" + """Replace all type reference names with actual types and return the lists + of all element names and attribute names""" elements: set[str] = set() attributes: set[str] = set() @@ -384,6 +517,9 @@ def generate_hash(items: list[str]) -> HashData: def collect_field_names( all_fields: set[str], cur_fields: set[str], refs: Iterable[Attribute | TypeRef], type_name: str ) -> None: + """Gather all field names into "all_fields" and make sure they are unique in + "cur_fields" """ + for ref in refs: all_fields.add(ref.py_name) if ref.py_name in cur_fields: diff --git a/xml_parser_generator/module_template.c.in b/xml_parser_generator/module_template.c.in index b1810ebe..fc777996 100644 --- a/xml_parser_generator/module_template.c.in +++ b/xml_parser_generator/module_template.c.in @@ -1545,8 +1545,8 @@ static int node_class_child__{$ type $}(parse_state *state,{$ 'PyObject **fields if(n->base.size && PyUnicode_CheckExact(n->base.content[n->base.size-1])) { dest = &n->base.content[n->base.size-1]; } else { - dest = frozen_list_push_tagged_value(state->py,TAGGED_UNION_NAME__{$ cname $},&n->base,NODE_LIST_INITIAL_CAPACITY); - if(dest == NULL) return -1; + if(frozen_list_push_object(&n->base,NULL,NODE_LIST_INITIAL_CAPACITY)) return -1; + dest = &n->base.content[n->base.size-1]; } //% else PyObject **dest = frozen_list_push_tagged_value(state->py,TAGGED_UNION_NAME__{$ cname $},&n->base,NODE_LIST_INITIAL_CAPACITY); From becb3a5f1823f0a6635c67a2f91a83e6add0aafb Mon Sep 17 00:00:00 2001 From: Rouslan Korneychuk Date: Thu, 28 Dec 2023 02:10:34 -0500 Subject: [PATCH 54/65] Tweaked setup options and added internal documentation --- MANIFEST.in | 6 +- pyproject.toml | 3 - setup.cfg | 7 + xml_parser_generator/make_parser.py | 23 ++- xml_parser_generator/module_template.c.in | 177 +++++++++++++++++++++- 5 files changed, 199 insertions(+), 17 deletions(-) diff --git a/MANIFEST.in b/MANIFEST.in index 5e971bb6..db7a32bc 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -2,8 +2,4 @@ recursive-include xml_parser_generator *.py *.in *.json include requirements/*.txt tests/*.py graft tests/data exclude breathe/_parser.pyi -prune scripts -prune examples -prune documentation -prune requirements -global-exclude *.py[cod] *.so *~ *.gitignore \ No newline at end of file +global-exclude *.py[cod] \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 96c2a160..22592490 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -38,9 +38,6 @@ homepage = "https://github.com/michaeljones/breathe" [project.scripts] breathe-apidoc = "breathe.apidoc:main" -[tool.setuptools.packages.find] -include = ["breathe*"] - [tool.black] line-length = 100 extend-exclude = ''' diff --git a/setup.cfg b/setup.cfg index d64f4357..a7013d04 100644 --- a/setup.cfg +++ b/setup.cfg @@ -7,3 +7,10 @@ per-file-ignores = [bdist_wheel] py-limited-api = cp38 universal = 0 + +[options] +packages = find: + +[options.packages.find] +include = + breathe* \ No newline at end of file diff --git a/xml_parser_generator/make_parser.py b/xml_parser_generator/make_parser.py index 2a3a687e..652addeb 100644 --- a/xml_parser_generator/make_parser.py +++ b/xml_parser_generator/make_parser.py @@ -1,3 +1,6 @@ +"""Parse a JSON schema file and generate the C code for a Python module to parse +XML""" + from __future__ import annotations import re @@ -134,7 +137,8 @@ class Attribute: optional: bool """Whether the attribute may be omitted. - Fields corresponding to omitted attributes are set to None""" + Fields corresponding to omitted attributes are set to None. + """ def py_type(self, as_param=False) -> str: """Get the Python type annotation describing the type of this attribute. @@ -196,7 +200,12 @@ class BuiltinType(SchemaType): @dataclasses.dataclass() -class SpType(BuiltinType): +class AddsToStringType(BuiltinType): + pass + + +@dataclasses.dataclass() +class SpType(AddsToStringType): """This element represents an arbitrary character whose code point is given in the attribute "value". @@ -205,7 +214,7 @@ class SpType(BuiltinType): @dataclasses.dataclass() -class CodePointType(BuiltinType): +class CodePointType(AddsToStringType): """This element represents a specific character.""" char: int @@ -327,7 +336,7 @@ def py_union_list(self) -> list[str]: needs_str = False for name, t in self.content.items(): assert isinstance(t, SchemaType) - if not isinstance(t, (SpType, CodePointType)): + if not isinstance(t, AddsToStringType): by_type[t.py_name].append(name) else: needs_str = True @@ -559,7 +568,9 @@ def field_count(t) -> int: if t.used_directly: list_element_field_counts.add(field_count(t)) if t.content_type == ContentType.union: - tag_names.update(t.content) + tag_names.update( + name for name, t in t.content.items() if not isinstance(t, AddsToStringType) + ) elif t.content_type == ContentType.tuple: tuple_field_counts.add(len(t.content)) tagonly_and_tuple_field_counts.add(len(t.content)) @@ -689,7 +700,7 @@ def __call__(self): "builtin_t": (lambda x: isinstance(x, BuiltinType)), "enumeration_t": (lambda x: isinstance(x, SchemaEnum)), "char_enum_t": (lambda x: isinstance(x, SchemaCharEnum)), - "appends_str": (lambda x: isinstance(x, (SpType, CodePointType))), + "appends_str": (lambda x: isinstance(x, AddsToStringType)), "used_directly": used_directly, "allow_text": allow_text, "has_attributes": has_attributes, diff --git a/xml_parser_generator/module_template.c.in b/xml_parser_generator/module_template.c.in index fc777996..f5ff4e1e 100644 --- a/xml_parser_generator/module_template.c.in +++ b/xml_parser_generator/module_template.c.in @@ -1,3 +1,132 @@ +/* +Python module to parse Doxygen's XML output. + +This module defines the following types: + +* FrozenList -- represented in C with frozen_list. + + FrozenList is an array type that can be modified and resized while parsing + but is immutable when exposed in Python. + +* FrozenListItr -- represented in C with frozen_list_itr. + + FrozenListItr is the iterator for FrozenList. + +* TaggedValue -- represented in C with tagged_value. + + TaggedValue is a name/value pair for tagged union values. + +* Node -- no corresponding C type. + + Node is an empty class used as a base type for all classes that start with + "Node_". It has no use in C code but breathe/parser.py adds a __repr__ + method to it that is defined in Python. + +* Node_X not derived from FrozenList -- always represented by + node_tagonly_common. + + These classes are generated according the input schema. + +* Node_X derived from FrozenList -- always represented by node_list_common. + + These classes are generated according the input schema. + +* ParseError -- no corresponding C type. + + The exception raised when there is a problem with the XML input that cannot + be ignored. + +* ParseWarning -- no corresponding C type. + + The warning class for possible problems in the XML input. Currently this + is only issued for unexpected elements and attributes. + + +Each Node_X type has some or all of the following functions: + +* int node_class_child_start__ELEMENT(parse_state *state,const XML_Char *child_name,const XML_Char **attr) + + The element start handler for a child element of ELEMENT. + + This mostly delegates to node_class_child__ELEMENT. This returns 1 if it was + handled, 0 if not and -1 if a Python exception has been raised. + +* int node_class_finish__ELEMENT(parse_state *state) + + The element end handler for a child element of ELEMENT. + + This mostly delegates to node_class_finish_fields__ELEMENT. This returns 1 + if it was handled, 0 if not and -1 if a Python exception has been raised. + +* int node_class_start__ELEMENT(parse_state *state,PyObject **dest,const XML_Char **attr) + + This has three responsibilities: + - To create the object corresponding to ELEMENT and set *dest to the + reference. + - To add the new object and the appropriate XML event handlers to the top of + the parser stack. + - To handle the XML attributes. + + This returns 1 on success and -1 if a Python exception is raised. 1 is + returned instead of 0 because these functions are always called from + functions that return 1 if an element was handled. + +* void node_class_new_set_fields__ELEMENT(PyObject **fields,PyObject *args,Py_ssize_t start_i) + + Set the fields of ELEMENT from the positional arguments in "args". + + "start_i" is the index of what should be considered the first argument. This + function always succeeds. This is a separate function so that derived + elements can use it. + +* int node_class_new_set_kw_field__ELEMENT(module_state *state,PyObject **fields,py_field_type field,PyObject *value) + + Set the field in ELEMENT corresponding to the keyword argument "field". + + This returns 1 if the argument was assigned to a field, 0 if it doesn't + correspond to a field of ELEMENT and -1 if a Python exception was raised. + This is a separate function so that derived elements can use it. + +* int node_class_new_fields_end__ELEMENT(module_state *state,PyObject **fields) + + This is called after all arguments are processed in a Python constructor + (the __new__ method). It is used to check for unset fields and to fill them + with default values or raise a Python exception. This returns 0 on success + and -1 if a Python exception is raised. This is a separate function so that + derived elements can use it. + +* int node_class_attr__ELEMENT(parse_state *state,[PyObject **fields,]attribute_type,const XML_Char**) + + Handle a single attribute for ELEMENT. + + This returns 1 if it was handled, 0 if not and -1 if a Python exception has + been raised. This is a separate function so that derived elements can use + it. + +* int node_class_attr_end__ELEMENT(parse_state *state,PyObject **fields) + + This is called after all attributes are handled. It is used to check for + unset fields and to fill them with default values or raise a Python + exception. This returns 0 on success and -1 if a Python exception is raised. + This is a separate function so that derived elements can use it. + +* int node_class_child__ELEMENT(parse_state *state,[PyObject**,]element_type,const XML_Char**) + + Handle a single child element for ELEMENT. + + This returns 1 if it was handled, 0 if not and -1 if a Python exception has + been raised. This is a separate function so that derived elements can use + it. + +* int node_class_finish_fields__ELEMENT(parse_state *state,PyObject **fields) + + This is called after all child elements are handled. It is used to check for + unset fields and to fill them with default values or raise a Python + exception. This returns 0 on success and -1 if a Python exception is raised. + This is a separate function so that derived elements can use it. + +*/ + #define PY_SSIZE_T_CLEAN #include @@ -71,8 +200,8 @@ enum { CLASS_COUNT }; +/* it's "ENUM_VALUE" because it refers to a Python enum value */ //% for type in types|select('enumeration_t') -/* it's ENUM_VALUE because it refers to a Python enum value */ enum { //% for value in type.children ENUM_VALUE__{$ type $}__{$ value.id $}{$ ' = 0' if loop.first $}, @@ -124,12 +253,34 @@ typedef int (*finish_callback)(struct _parse_state*); typedef int (*text_callback)(struct _parse_state*,const XML_Char*,int); typedef struct _parse_callbacks { + /* A pointer to the value corresponding the currently visited XML element. + + The callbacks below are free to set this to a different Python reference. + */ PyObject **value; + + /* A callback for a child of the current element. + + This may be NULL if no child elements are allowed. + */ child_start_callback cs_call; + + /* A callback for text contained directly inside the current element. + + This may be NULL if text is not allowed. If NULL, whitespace is ignored. + */ text_callback t_call; + + /* A callback for when the current element is closed. + + This may be NULL if no action is needed. */ finish_callback f_call; } parse_callbacks; +/* A "block" or the parser callbacks stack. + +The stack is broken into fixed sized "blocks" kept in a double-linked list. +*/ typedef struct _callback_stack_block { struct _callback_stack_block *prev; struct _callback_stack_block *next; @@ -137,10 +288,31 @@ typedef struct _callback_stack_block { } callback_stack_block; typedef struct _parse_state { + /* The block that contains the top stack item. + + There may be blocks before but also blocks after. Blocks are not freed until + parsing is done. All the blocks have to be accessed through this pointer as + it is the only pointer to a block that is not inside another block. + */ callback_stack_block *block; + + /* The number of stack items inside the current block. + + All previous blocks are full and all subsequent blocks are empty. + */ unsigned int block_used; + XML_Parser parser; + + /* White this is greater than zero all XML content is ignored. + + This starts at zero. When an unexpected element start is encountered, a + warning is issued (via PyErr_WarnFormat) and this is set to 1. Any + subsequent element-starts increment this and element-ends decrement this + until this is zero again, and normal parsing resumes. + */ int ignore_level; + module_state *py; } parse_state; @@ -260,13 +432,12 @@ static void XMLCALL end_element(void *user,const XML_Char *Py_UNUSED(name)) { int non_whitespace(const char *s,int len) { int i; for(i=0; i= 0; From f70f22e061a907b7de412df8dada3fe331909ec6 Mon Sep 17 00:00:00 2001 From: Rouslan Korneychuk Date: Mon, 8 Jan 2024 05:46:00 -0500 Subject: [PATCH 55/65] Added ability to run tests with "cached" Doxygen output --- breathe/process.py | 23 ++-- pyproject.toml | 9 +- scripts/doxygen_cache.py | 71 +++++++++++ tests/data/examples/doxyfile_template | 17 +++ .../data/examples/test_dot_graphs/compare.xml | 3 +- tests/test_examples.py | 85 ++++++++----- xml_parser_generator/make_parser.py | 1 + xml_parser_generator/module_template.c.in | 115 ++++++++++-------- 8 files changed, 226 insertions(+), 98 deletions(-) create mode 100644 scripts/doxygen_cache.py create mode 100644 tests/data/examples/doxyfile_template diff --git a/breathe/process.py b/breathe/process.py index 66a19b50..329541a2 100644 --- a/breathe/process.py +++ b/breathe/process.py @@ -1,8 +1,13 @@ +from __future__ import annotations + from breathe.project import AutoProjectInfo, ProjectInfoFactory import os from shlex import quote -from typing import Callable, Dict, List, Tuple +from typing import Callable, TYPE_CHECKING + +if TYPE_CHECKING: + from collections.abc import Mapping AUTOCFG_TEMPLATE = r""" @@ -29,7 +34,7 @@ class ProjectData: """Simple handler for the files and project_info for each project.""" - def __init__(self, auto_project_info: AutoProjectInfo, files: List[str]) -> None: + def __init__(self, auto_project_info: AutoProjectInfo, files: list[str]) -> None: self.auto_project_info = auto_project_info self.files = files @@ -47,11 +52,11 @@ def __init__( def generate_xml( self, - projects_source: Dict[str, Tuple[str, List[str]]], - doxygen_options: Dict[str, str], - doxygen_aliases: Dict[str, str], + projects_source: Mapping[str, tuple[str, list[str]]], + doxygen_options: Mapping[str, str], + doxygen_aliases: Mapping[str, str], ) -> None: - project_files: Dict[str, ProjectData] = {} + project_files: Mapping[str, ProjectData] = {} # First collect together all the files which need to be doxygen processed for each project for project_name, file_structure in projects_source.items(): @@ -73,9 +78,9 @@ def generate_xml( def process( self, auto_project_info: AutoProjectInfo, - files: List[str], - doxygen_options: Dict[str, str], - doxygen_aliases: Dict[str, str], + files: list[str], + doxygen_options: Mapping[str, str], + doxygen_aliases: Mapping[str, str], ) -> str: name = auto_project_info.name() full_paths = [auto_project_info.abs_path_to_source_file(f) for f in files] diff --git a/pyproject.toml b/pyproject.toml index 22592490..42f05e49 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -44,9 +44,6 @@ extend-exclude = ''' ^/examples/.* | ^/tests/data/.* ''' -#[tool.cibuildwheel] -#test-requires = "pytest" -#test-command = "pytest {project}/tests" - -#[tool.cibuildwheel.linux] -#before-all = "yum install -y doxygen" +[tool.cibuildwheel] +test-requires = "pytest" +test-command = "BREATHE_DOXYGEN_TEST_CACHE={project}/tests/data/examples/_cache pytest {project}/tests" diff --git a/scripts/doxygen_cache.py b/scripts/doxygen_cache.py new file mode 100644 index 00000000..96b27407 --- /dev/null +++ b/scripts/doxygen_cache.py @@ -0,0 +1,71 @@ +"""Run Doxygen on all test samples and save the results. + +This allows running the tests in multiple Docker containers with different +architectures, without requiring each of them to download and build a specific +version of Doxygen. +""" + +import os +import pathlib +import shutil +import subprocess + +from breathe.process import AUTOCFG_TEMPLATE + + +PROJECT_DIR = pathlib.Path(__file__).parent.parent +DATA_DIR = PROJECT_DIR / "tests" / "data" +EXAMPLES_DIR = DATA_DIR / "examples" +CACHE_DIR = EXAMPLES_DIR / "_cache" + + +def make_cache(): + template = (EXAMPLES_DIR / "doxyfile_template").read_text() + + exc = shutil.which("doxygen") + if exc is None: + raise ValueError("cannot find doxygen executable") + + CACHE_DIR.mkdir(exist_ok=True) + prev_dir = os.getcwd() + + r = subprocess.run( + [exc, "--version"], check=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True + ) + (CACHE_DIR / "version.txt").write_text(r.stdout) + + try: + for p in EXAMPLES_DIR.glob("test_*"): + print(f"generating output for {p.name}") + os.chdir(p) + out_dir = CACHE_DIR / p.name + out_dir.mkdir(exist_ok=True) + doxyfile = out_dir / "Doxyfile" + doxycontent = template.format(output=out_dir) + extra_opts = pathlib.Path("extra_dox_opts.txt") + if extra_opts.exists(): + doxycontent += extra_opts.read_text() + doxyfile.write_text(doxycontent) + + subprocess.run([exc, doxyfile], check=True) + + print(f"generating output for auto") + os.chdir(DATA_DIR / "auto") + out_dir = CACHE_DIR / "auto" + out_dir.mkdir(exist_ok=True) + + doxyfile = out_dir / "Doxyfile" + doxyfile.write_text(AUTOCFG_TEMPLATE.format( + project_name="example", + output_dir=str(out_dir), + input='"auto_class.h" "auto_function.h"', + extra="" + )) + + subprocess.run([exc, doxyfile], check=True) + finally: + os.chdir(prev_dir) + + +if __name__ == "__main__": + make_cache() \ No newline at end of file diff --git a/tests/data/examples/doxyfile_template b/tests/data/examples/doxyfile_template new file mode 100644 index 00000000..179004cc --- /dev/null +++ b/tests/data/examples/doxyfile_template @@ -0,0 +1,17 @@ +PROJECT_NAME = "example" +HAVE_DOT = YES +DOTFILE_DIRS = "." +GENERATE_LATEX = NO +GENERATE_MAN = NO +GENERATE_RTF = NO +CASE_SENSE_NAMES = NO +OUTPUT_DIRECTORY = "{output}" +IMAGE_PATH = "." +QUIET = YES +JAVADOC_AUTOBRIEF = YES +GENERATE_HTML = NO +GENERATE_XML = YES +WARN_IF_UNDOCUMENTED = NO +ALIASES = "rst=\verbatim embed:rst" +ALIASES += "endrst=\endverbatim" +ALIASES += "inlinerst=\verbatim embed:rst:inline" diff --git a/tests/data/examples/test_dot_graphs/compare.xml b/tests/data/examples/test_dot_graphs/compare.xml index c5c93871..c77ef28c 100644 --- a/tests/data/examples/test_dot_graphs/compare.xml +++ b/tests/data/examples/test_dot_graphs/compare.xml @@ -11,7 +11,8 @@
Using @dotfile command - + +
Captions go here
diff --git a/tests/test_examples.py b/tests/test_examples.py index 3bda5cb2..9cc1f0a5 100644 --- a/tests/test_examples.py +++ b/tests/test_examples.py @@ -1,5 +1,6 @@ from __future__ import annotations +import os from xml.parsers import expat import pytest import pathlib @@ -9,6 +10,8 @@ import dataclasses import sphinx +from breathe.process import AutoDoxygenProcessHandle + from typing import Any sphinx_path: Any @@ -19,25 +22,6 @@ sphinx_path = pathlib.Path -DOXYFILE_TEMPLATE = """ -PROJECT_NAME = "example" -HAVE_DOT = YES -GENERATE_LATEX = NO -GENERATE_MAN = NO -GENERATE_RTF = NO -CASE_SENSE_NAMES = NO -OUTPUT_DIRECTORY = "{output}" -IMAGE_PATH = "." -QUIET = YES -JAVADOC_AUTOBRIEF = YES -GENERATE_HTML = NO -GENERATE_XML = YES -WARN_IF_UNDOCUMENTED = NO -ALIASES = "rst=\\verbatim embed:rst" -ALIASES += "endrst=\\endverbatim" -ALIASES += "inlinerst=\\verbatim embed:rst:inline" -""" - C_FILE_SUFFIXES = frozenset((".h", ".c", ".hpp", ".cpp")) IGNORED_ELEMENTS: frozenset[str] = frozenset(()) @@ -52,6 +36,8 @@ "extensions": ["breathe", "sphinx.ext.graphviz"], } +DOXYGEN_CACHE_KEY = "BREATHE_DOXYGEN_TEST_CACHE" + class XMLEventType(enum.Enum): E_START = enum.auto() @@ -189,6 +175,11 @@ class VersionedFile: version: tuple[int, ...] +@dataclasses.dataclass +class DoxygenExe(VersionedFile): + template: str + + def str_to_version(v_str): return tuple(map(int, v_str.strip().split("."))) @@ -241,23 +232,44 @@ def compare_xml(generated, version): @pytest.fixture(scope="module") -def doxygen(): - exc = shutil.which("doxygen") - if exc is None: - raise ValueError("cannot find doxygen executable") +def doxygen_cache(): + dc = os.environ.get(DOXYGEN_CACHE_KEY) + if not dc: + return None + return pathlib.Path(dc).absolute() + - r = subprocess.run( - [exc, "--version"], check=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True +@pytest.fixture(scope="module") +def doxygen(doxygen_cache): + if doxygen_cache is None: + exc = shutil.which("doxygen") + if not exc: + raise ValueError("cannot find doxygen executable") + + v_str = subprocess.run( + [exc, "--version"], + check=True, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + text=True, + ).stdout + else: + exc = "" + v_str = (doxygen_cache / "version.txt").read_text() + + return DoxygenExe( + exc, + str_to_version(v_str.split()[0]), + (TEST_DATA_DIR / "examples" / "doxyfile_template").read_text(), ) - return VersionedFile(exc, str_to_version(r.stdout.split()[0])) @pytest.mark.parametrize("test_input", get_individual_tests()) -def test_example(make_app, tmp_path, test_input, monkeypatch, doxygen): +def test_example(make_app, tmp_path, test_input, monkeypatch, doxygen, doxygen_cache): monkeypatch.chdir(test_input) doxyfile = tmp_path / "Doxyfile" - doxycontent = DOXYFILE_TEMPLATE.format(output=tmp_path) + doxycontent = doxygen.template.format(output=tmp_path) extra_opts = pathlib.Path("extra_dox_opts.txt") if extra_opts.exists(): doxycontent += extra_opts.read_text() @@ -265,7 +277,15 @@ def test_example(make_app, tmp_path, test_input, monkeypatch, doxygen): (tmp_path / "conf.py").touch() shutil.copyfile("input.rst", tmp_path / "index.rst") - subprocess.run([doxygen.file, doxyfile], check=True) + if doxygen_cache is not None: + # instead of passing a different path to breathe_projects.example, the + # folder is copied to the same place it would be without caching so that + # all paths in the generated output remain the same + shutil.copytree( + doxygen_cache / test_input.name / "xml", + tmp_path / "xml") + else: + subprocess.run([doxygen.file, doxyfile], check=True) make_app( buildername="xml", @@ -276,9 +296,14 @@ def test_example(make_app, tmp_path, test_input, monkeypatch, doxygen): compare_xml(tmp_path / "_build" / "xml" / "index.xml", doxygen.version) -def test_auto(make_app, tmp_path, monkeypatch, doxygen): +def test_auto(make_app, tmp_path, monkeypatch, doxygen, doxygen_cache): test_input = TEST_DATA_DIR / "auto" monkeypatch.chdir(test_input) + + if doxygen_cache is not None: + xml_path = str(doxygen_cache / "auto" / "xml") + monkeypatch.setattr(AutoDoxygenProcessHandle, "process", (lambda *args, **kwds: xml_path)) + (tmp_path / "conf.py").touch() shutil.copyfile("input.rst", tmp_path / "index.rst") diff --git a/xml_parser_generator/make_parser.py b/xml_parser_generator/make_parser.py index 652addeb..7ffbd350 100644 --- a/xml_parser_generator/make_parser.py +++ b/xml_parser_generator/make_parser.py @@ -688,6 +688,7 @@ def __call__(self): for t in schema.types.values(): if isinstance(t, ElementType) and any(field_count(cast(ElementType, b)) for b in t.bases): + # the code was written to support this but it has never been tested raise ValueError( 'elements having bases that have "attributes" or "children" are not currently supported' ) diff --git a/xml_parser_generator/module_template.c.in b/xml_parser_generator/module_template.c.in index f5ff4e1e..b84c9ece 100644 --- a/xml_parser_generator/module_template.c.in +++ b/xml_parser_generator/module_template.c.in @@ -3,91 +3,71 @@ Python module to parse Doxygen's XML output. This module defines the following types: -* FrozenList -- represented in C with frozen_list. +- FrozenList -- represented in C with frozen_list. FrozenList is an array type that can be modified and resized while parsing but is immutable when exposed in Python. -* FrozenListItr -- represented in C with frozen_list_itr. +- FrozenListItr -- represented in C with frozen_list_itr. FrozenListItr is the iterator for FrozenList. -* TaggedValue -- represented in C with tagged_value. +- TaggedValue -- represented in C with tagged_value. TaggedValue is a name/value pair for tagged union values. -* Node -- no corresponding C type. +- Node -- no corresponding C type. Node is an empty class used as a base type for all classes that start with "Node_". It has no use in C code but breathe/parser.py adds a __repr__ method to it that is defined in Python. -* Node_X not derived from FrozenList -- always represented by +- Node_X not derived from FrozenList -- always represented by node_tagonly_common. These classes are generated according the input schema. -* Node_X derived from FrozenList -- always represented by node_list_common. +- Node_X derived from FrozenList -- always represented by node_list_common. These classes are generated according the input schema. -* ParseError -- no corresponding C type. +- ListItem_X -- represented in C with "tuple_item". + + Types that have "kind" equal to "tuple_list_element" in the schema also have + a companion class for their elements. It will have the same name as the main + class except it starts with ListItem_ instead of Node_. These classes are + similar to named tuples. + +- ParseError -- no corresponding C type. The exception raised when there is a problem with the XML input that cannot be ignored. -* ParseWarning -- no corresponding C type. +- ParseWarning -- no corresponding C type. The warning class for possible problems in the XML input. Currently this is only issued for unexpected elements and attributes. -Each Node_X type has some or all of the following functions: - -* int node_class_child_start__ELEMENT(parse_state *state,const XML_Char *child_name,const XML_Char **attr) - - The element start handler for a child element of ELEMENT. - - This mostly delegates to node_class_child__ELEMENT. This returns 1 if it was - handled, 0 if not and -1 if a Python exception has been raised. - -* int node_class_finish__ELEMENT(parse_state *state) - - The element end handler for a child element of ELEMENT. - - This mostly delegates to node_class_finish_fields__ELEMENT. This returns 1 - if it was handled, 0 if not and -1 if a Python exception has been raised. - -* int node_class_start__ELEMENT(parse_state *state,PyObject **dest,const XML_Char **attr) - - This has three responsibilities: - - To create the object corresponding to ELEMENT and set *dest to the - reference. - - To add the new object and the appropriate XML event handlers to the top of - the parser stack. - - To handle the XML attributes. - - This returns 1 on success and -1 if a Python exception is raised. 1 is - returned instead of 0 because these functions are always called from - functions that return 1 if an element was handled. +Each non-simple type has some or all of the following functions: -* void node_class_new_set_fields__ELEMENT(PyObject **fields,PyObject *args,Py_ssize_t start_i) +- void node_class_new_set_fields__X(PyObject **fields,PyObject *args,Py_ssize_t start_i) - Set the fields of ELEMENT from the positional arguments in "args". + Set the fields of Node_X from the positional arguments in "args". "start_i" is the index of what should be considered the first argument. This function always succeeds. This is a separate function so that derived elements can use it. -* int node_class_new_set_kw_field__ELEMENT(module_state *state,PyObject **fields,py_field_type field,PyObject *value) +- int node_class_new_set_kw_field__X(module_state *state,PyObject **fields,py_field_type field,PyObject *value) - Set the field in ELEMENT corresponding to the keyword argument "field". + Set the field in Node_X corresponding to the keyword argument "field". This returns 1 if the argument was assigned to a field, 0 if it doesn't - correspond to a field of ELEMENT and -1 if a Python exception was raised. + correspond to a field of Node_X and -1 if a Python exception was raised. This is a separate function so that derived elements can use it. -* int node_class_new_fields_end__ELEMENT(module_state *state,PyObject **fields) +- int node_class_new_fields_end__X(module_state *state,PyObject **fields) This is called after all arguments are processed in a Python constructor (the __new__ method). It is used to check for unset fields and to fill them @@ -95,36 +75,67 @@ Each Node_X type has some or all of the following functions: and -1 if a Python exception is raised. This is a separate function so that derived elements can use it. -* int node_class_attr__ELEMENT(parse_state *state,[PyObject **fields,]attribute_type,const XML_Char**) +- int node_class_attr__X(parse_state *state,[PyObject **fields,]attribute_type,const XML_Char**) - Handle a single attribute for ELEMENT. + Handle a single attribute for element X. This returns 1 if it was handled, 0 if not and -1 if a Python exception has been raised. This is a separate function so that derived elements can use it. -* int node_class_attr_end__ELEMENT(parse_state *state,PyObject **fields) +- int node_class_attr_end__X(parse_state *state,PyObject **fields) This is called after all attributes are handled. It is used to check for unset fields and to fill them with default values or raise a Python exception. This returns 0 on success and -1 if a Python exception is raised. This is a separate function so that derived elements can use it. -* int node_class_child__ELEMENT(parse_state *state,[PyObject**,]element_type,const XML_Char**) +- int node_class_child__X(parse_state *state,[PyObject**,]element_type,const XML_Char**) - Handle a single child element for ELEMENT. + Handle a single child element for element X. This returns 1 if it was handled, 0 if not and -1 if a Python exception has been raised. This is a separate function so that derived elements can use it. -* int node_class_finish_fields__ELEMENT(parse_state *state,PyObject **fields) +- int node_class_finish_fields__X(parse_state *state,PyObject **fields) This is called after all child elements are handled. It is used to check for unset fields and to fill them with default values or raise a Python exception. This returns 0 on success and -1 if a Python exception is raised. This is a separate function so that derived elements can use it. + +If the type is used directly, it will have its own Python class and the +following C functions: + +- int node_class_child_start__X(parse_state *state,const XML_Char *child_name,const XML_Char **attr) + + The element start handler for a child element of element X. + + This mostly delegates to node_class_child__X. This returns 1 if it was + handled, 0 if not and -1 if a Python exception has been raised. + +- int node_class_finish__X(parse_state *state) + + The element end handler for a child element of element X. + + This mostly delegates to node_class_finish_fields__X. This returns 1 if it + was handled, 0 if not and -1 if a Python exception has been raised. + +- int node_class_start__X(parse_state *state,PyObject **dest,const XML_Char **attr) + + This has three responsibilities: + - To create the object corresponding to element X and set *dest to the + reference. + - To add the new object and the appropriate XML event handlers to the top of + the parser stack. + - To handle the XML attributes. + + This returns 1 on success and -1 if a Python exception is raised. 1 is + returned instead of 0 because these functions are always called from + functions that return 1 if an element was handled. + */ #define PY_SSIZE_T_CLEAN @@ -304,7 +315,7 @@ typedef struct _parse_state { XML_Parser parser; - /* White this is greater than zero all XML content is ignored. + /* While this is greater than zero all XML content is ignored. This starts at zero. When an unexpected element start is encountered, a warning is issued (via PyErr_WarnFormat) and this is set to 1. Any @@ -429,7 +440,7 @@ static void XMLCALL end_element(void *user,const XML_Char *Py_UNUSED(name)) { pop_callbacks(state); } -int non_whitespace(const char *s,int len) { +static int non_whitespace(const char *s,int len) { int i; for(i=0; i Date: Sat, 13 Jan 2024 01:55:44 -0500 Subject: [PATCH 56/65] Now generates pure-python parser in addition to compiled extension --- breathe/parser.py | 46 +- setup.py | 2 +- xml_parser_generator/CMakeLists.txt | 38 -- xml_parser_generator/make_parser.py | 83 ++- xml_parser_generator/module_template.c.in | 37 +- xml_parser_generator/module_template.py.in | 692 +++++++++++++++++++++ xml_parser_generator/setuptools_builder.py | 32 +- 7 files changed, 824 insertions(+), 106 deletions(-) delete mode 100644 xml_parser_generator/CMakeLists.txt create mode 100644 xml_parser_generator/module_template.py.in diff --git a/breathe/parser.py b/breathe/parser.py index ea412f4a..143a48d4 100644 --- a/breathe/parser.py +++ b/breathe/parser.py @@ -7,7 +7,6 @@ from breathe import file_state_cache, path_handler from breathe.project import ProjectInfo -from breathe._parser import * from sphinx.application import Sphinx @@ -19,36 +18,41 @@ T_inv = TypeVar("T_inv") -@reprlib.recursive_repr() -def node_repr(self: Node) -> str: # pragma: no cover - cls = type(self) - fields = [] - if isinstance(self, FrozenList): - pos = ", ".join(map(repr, self)) - fields.append(f"[{pos}]") - fields.extend(f"{field}={getattr(self,field)!r}" for field in cls._fields) - inner = ", ".join(fields) - return f"{cls.__name__}({inner})" +try: + from breathe._parser import * +except ImportError: + from breathe._parser_py import * +else: + @reprlib.recursive_repr() + def node_repr(self: Node) -> str: # pragma: no cover + cls = type(self) + fields = [] + if isinstance(self, FrozenList): + pos = ", ".join(map(repr, self)) + fields.append(f"[{pos}]") + fields.extend(f"{field}={getattr(self,field)!r}" for field in cls._fields) + inner = ", ".join(fields) + return f"{cls.__name__}({inner})" -Node.__repr__ = node_repr # type: ignore + Node.__repr__ = node_repr # type: ignore -@reprlib.recursive_repr() -def taggedvalue_repr(self: TaggedValue) -> str: # pragma: no cover - return f"{self.__class__.__name__}({self.name!r}, {self.value!r})" + @reprlib.recursive_repr() + def taggedvalue_repr(self: TaggedValue) -> str: # pragma: no cover + return f"{self.__class__.__name__}({self.name!r}, {self.value!r})" -TaggedValue.__repr__ = taggedvalue_repr # type: ignore + TaggedValue.__repr__ = taggedvalue_repr # type: ignore -@reprlib.recursive_repr() -def frozenlist_repr(self: FrozenList) -> str: # pragma: no cover - inner = ", ".join(map(repr, self)) - return f"{self.__class__.__name__}([{inner}])" + @reprlib.recursive_repr() + def frozenlist_repr(self: FrozenList) -> str: # pragma: no cover + inner = ", ".join(map(repr, self)) + return f"{self.__class__.__name__}([{inner}])" -FrozenList.__repr__ = frozenlist_repr # type: ignore + FrozenList.__repr__ = frozenlist_repr # type: ignore def description_has_content(node: Node_descriptionType | None) -> bool: diff --git a/setup.py b/setup.py index 5199ae9e..97d41cab 100644 --- a/setup.py +++ b/setup.py @@ -23,7 +23,7 @@ Extension( "_parser", [], # source is generated by CustomBuildExt - depends=CustomBuildExt.DEPENDENCIES, + depends=CustomBuildExt.M_DEPENDENCIES, libraries=["expat"], define_macros=[ ("PARSER_PY_LIMITED_API", "0x03080000"), # set Stable ABI version to 3.8 diff --git a/xml_parser_generator/CMakeLists.txt b/xml_parser_generator/CMakeLists.txt deleted file mode 100644 index 1cf7158a..00000000 --- a/xml_parser_generator/CMakeLists.txt +++ /dev/null @@ -1,38 +0,0 @@ -# Note: CMake and this file are not neccessary to build and install Breathe. -# This exists to aid in development. - -cmake_minimum_required(VERSION 3.26) -project(doxyparse LANGUAGES C) - -find_package(Python3 REQUIRED COMPONENTS Interpreter Development.Module) -#find_package(Python3 REQUIRED COMPONENTS Interpreter Development.Module Development.SABIModule) -find_package(EXPAT) - -set(module_name parser) - -if (MSVC) - add_compile_options(/W4) -else() - add_compile_options(-Wall -Wextra -Werror=implicit-function-declaration) -endif() - -add_custom_command( - OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${module_name}.c ${CMAKE_CURRENT_SOURCE_DIR}/stubs_template.pyi - COMMAND Python3::Interpreter - ${CMAKE_CURRENT_SOURCE_DIR}/make_parser.py - ${CMAKE_CURRENT_SOURCE_DIR}/schema.json - ${CMAKE_CURRENT_SOURCE_DIR}/module_template.c - ${CMAKE_CURRENT_SOURCE_DIR}/stubs_template.pyi - ${CMAKE_CURRENT_BINARY_DIR}/${module_name}.c - ${CMAKE_CURRENT_BINARY_DIR}/${module_name}.pyi - DEPENDS - ${CMAKE_CURRENT_SOURCE_DIR}/make_parser.py - ${CMAKE_CURRENT_SOURCE_DIR}/schema.json - ${CMAKE_CURRENT_SOURCE_DIR}/module_template.c - ${CMAKE_CURRENT_SOURCE_DIR}/stubs_template.pyi - VERBATIM) - -Python3_add_library(${module_name} MODULE WITH_SOABI ${CMAKE_CURRENT_BINARY_DIR}/${module_name}.c) -#Python3_add_library(${module_name} MODULE USE_SABI 3.7 WITH_SOABI ${CMAKE_CURRENT_BINARY_DIR}/${module_name}.c) -target_link_libraries(${module_name} PRIVATE EXPAT::EXPAT) -target_compile_definitions(${module_name} PRIVATE MODULE_NAME=${module_name}) diff --git a/xml_parser_generator/make_parser.py b/xml_parser_generator/make_parser.py index 7ffbd350..64321015 100644 --- a/xml_parser_generator/make_parser.py +++ b/xml_parser_generator/make_parser.py @@ -60,7 +60,11 @@ class ContentType(enum.Enum): """Child elements are grouped into named tuple-like objects. Each batch of child elements must appear in order in the XML document. Text - content is not allowed.""" + content is not allowed. + + Currently, tuple child element names must be valid Python identifiers as + there isn't a way to have different field names. + """ union = enum.auto() """Each item is either a tagged union (an instance of TaggedValue) or a @@ -113,6 +117,17 @@ def py_type(self, as_param=False) -> str: return f"{self.type.py_name} | None" return self.type.py_name + def needs_finish(self) -> bool: + """Return True if the field value will need to be checked at the end of + parsing the element. + + This is the case case for all fields except list fields with no minimum. + For most fields, we need to know how many corresponding child elements + exist, which can't be known until the parent element is fully parsed, + but list fields without minimums accept any number of child elements. + """ + return not self.is_list or self.min_items > 0 + @dataclasses.dataclass() class Attribute: @@ -176,6 +191,11 @@ def extra_args(self) -> str: the type's element start handler""" return "" + def add_sorted(self, dest: list[SchemaType], visited: set[int]) -> None: + if id(self) not in visited: + visited.add(id(self)) + dest.append(self) + if TYPE_CHECKING: @property @@ -279,6 +299,14 @@ def all_fields(self) -> Iterable[TypeRef | Attribute]: def py_name(self) -> str: return f"Node_{self.name}" + def add_sorted(self, dest: list[SchemaType], visited: set[int]) -> None: + if id(self) not in visited: + for b in self.bases: + assert isinstance(b, SchemaType) + b.add_sorted(dest, visited) + visited.add(id(self)) + dest.append(self) + @dataclasses.dataclass() class TagOnlyElement(ElementType): @@ -672,6 +700,25 @@ def optional(ref: TypeRef | Attribute) -> bool: return ref.is_list or ref.min_items == 0 return ref.optional + def array_field(ref) -> bool: + if isinstance(ref, TypeRef): + return ref.is_list + return False + + def needs_finish_fields_call(t): + if not isinstance(t, ElementType): + return False + return any(c.needs_finish() for c in t.children.values()) or any( + map(needs_finish_fields_call, t.bases) + ) + + def needs_finish_call(t): + return needs_finish_fields_call(t) or ( + isinstance(t, ListElement) + and t.content_type == ContentType.tuple + and len(t.content) > 1 + ) + def error(msg): raise TypeError(msg) @@ -686,7 +733,12 @@ def __call__(self): self.used = True return self.content + # types sorted topologically with regard to base elements + sorted_types: list[SchemaType] = [] + visited_types: set[SchemaType] = set() + for t in schema.types.values(): + t.add_sorted(sorted_types, visited_types) if isinstance(t, ElementType) and any(field_count(cast(ElementType, b)) for b in t.bases): # the code was written to support this but it has never been tested raise ValueError( @@ -702,6 +754,8 @@ def __call__(self): "enumeration_t": (lambda x: isinstance(x, SchemaEnum)), "char_enum_t": (lambda x: isinstance(x, SchemaCharEnum)), "appends_str": (lambda x: isinstance(x, AddsToStringType)), + "code_point_t": (lambda x: isinstance(x, CodePointType)), + "sp_t": (lambda x: isinstance(x, CodePointType)), "used_directly": used_directly, "allow_text": allow_text, "has_attributes": has_attributes, @@ -709,10 +763,13 @@ def __call__(self): "has_children_or_content": has_children_or_content, "has_fields": lambda x: field_count(x) > 0, "has_children_or_tuple_content": has_children_or_tuple_content, + "needs_finish_fields_call": needs_finish_fields_call, + "needs_finish_call": needs_finish_call, "content_bare": content_type(ContentType.bare), "content_tuple": content_type(ContentType.tuple), "content_union": content_type(ContentType.union), "optional": optional, + "array_field": array_field, } ) tmpl_env.filters.update( @@ -728,7 +785,7 @@ def __call__(self): ) tmpl_env.globals.update( { - "types": list(schema.types.values()), + "types": sorted_types, "root_elements": list(schema.roots.items()), "element_names": elements, "attribute_names": attributes, @@ -951,24 +1008,14 @@ def check_schema(x) -> Schema: return r -def generate_from_json( - json_path, c_template_file, pyi_template_file, c_output_file, pyi_output_file -) -> None: +def generate_from_json(json_path, template_files) -> None: with open(json_path, "rb") as ifile: schema = check_schema(json.load(ifile)) env = make_env(schema) - with open(c_template_file) as tfile: - template_str = tfile.read() - with open(c_output_file, "w") as ofile: - env.from_string(template_str).stream().dump(ofile) - - with open(pyi_template_file) as tfile: - template_str = tfile.read() - with open(pyi_output_file, "w") as ofile: - env.from_string(template_str).stream().dump(ofile) - - -if __name__ == "__main__": - generate_from_json(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5]) + for i_file, o_file in template_files: + with open(i_file) as tfile: + template_str = tfile.read() + with open(o_file, "w") as ofile: + env.from_string(template_str).stream().dump(ofile) diff --git a/xml_parser_generator/module_template.c.in b/xml_parser_generator/module_template.c.in index b84c9ece..432aec09 100644 --- a/xml_parser_generator/module_template.c.in +++ b/xml_parser_generator/module_template.c.in @@ -109,6 +109,13 @@ Each non-simple type has some or all of the following functions: If the type is used directly, it will have its own Python class and the following C functions: +- Py_ssize_t assign_field_name_tuple__X(PyObject *dest,PyObject **names,Py_ssize_t start_i) + + Populate the tuple "dest" with the full list of field names of X. + + The return value is the number of fields set. This is compared to the number + of fields expected, in an assert statement. + - int node_class_child_start__X(parse_state *state,const XML_Char *child_name,const XML_Char **attr) The element start handler for a child element of element X. @@ -814,7 +821,7 @@ static PyObject *parse_error_tp_str(PyObject *self) { PyObject *args = parse_error_get_args(self); if(args == NULL) return NULL; lineno = PyTuple_GetItem(args,1); - if(lineno == Py_None) r = PyUnicode_FromFormat("Error: %S",lineno,PyTuple_GetItem(args,0)); + if(lineno == Py_None) r = PyUnicode_FromFormat("Error: %S",PyTuple_GetItem(args,0)); else r = PyUnicode_FromFormat("Error on line %S: %S",lineno,PyTuple_GetItem(args,0)); Py_DECREF(args); return r; @@ -1199,7 +1206,7 @@ static PyType_Slot node_class_slots__{$ type $}[] = { //% if type is has_children_or_content static int node_class_child_start__{$ type $}(parse_state*,const XML_Char*,const XML_Char**); //% endif -//% if type is has_children_or_tuple_content +//% if type is needs_finish_call static int node_class_finish__{$ type $}(parse_state*); //% endif static int node_class_start__{$ type $}(parse_state*,PyObject**,const XML_Char**); @@ -1217,7 +1224,7 @@ static int node_class_attr_end__{$ type $}(parse_state*,PyObject**); //% if type is has_children_or_content static int node_class_child__{$ type $}(parse_state*,{$ 'PyObject**,' if type is has_fields $}element_type,const XML_Char**); //% endif -//% if type is has_children +//% if type is needs_finish_fields_call static int node_class_finish_fields__{$ type $}(parse_state *state,PyObject **fields); //% endif //% elif type is enumeration_t or type is char_enum_t @@ -1270,6 +1277,7 @@ static PyObject *tuple_item_tp_new__{$ type $}(PyTypeObject *subtype,PyObject *a } static PyType_Slot tuple_item_slots__{$ type $}[] = { + {Py_tp_new,tuple_item_tp_new__{$ type $}}, {Py_tp_members,tuple_item_members__{$ type $}}, {Py_tp_dealloc,node_tagonly_common_dealloc_{$ type.content|length $}}, {Py_tp_traverse,node_tagonly_common_traverse_{$ type.content|length $}}, @@ -1605,7 +1613,7 @@ static int node_class_new_set_kw_field__{$ type $}(module_state *state,PyObject } static int node_class_new_fields_end__{$ type $}(module_state *state,PyObject **fields) { //% for b in type.bases if b|field_count - if(node_class_new_fields_end__{$ b $}(fields + BASE_FIELD_OFFSET__{$ type $}__{$ b $})) return -1; + if(node_class_new_fields_end__{$ b $}(state,fields + BASE_FIELD_OFFSET__{$ type $}__{$ b $})) return -1; //% endfor //% for ref in type|attributes if(fields[FIELD__{$ type $}__{$ ref.py_name $}] == NULL) { @@ -1656,7 +1664,7 @@ static int node_class_attr__{$ type $}(parse_state *state,{$ 'PyObject **fields, } static int node_class_attr_end__{$ type $}(parse_state *state, PyObject **fields) { //% for b in type.bases if b is has_attributes - if(node_class_attr_end__{$ b $}(state),fields + BASE_FIELD_OFFSET__{$ type $}__{$ b $}) return -1; + if(node_class_attr_end__{$ b $}(state,fields + BASE_FIELD_OFFSET__{$ type $}__{$ b $})) return -1; //% endfor //% for ref in type|attributes if(fields[FIELD__{$ type $}__{$ ref.py_name $}] == NULL) { @@ -1724,12 +1732,9 @@ static int node_class_child__{$ type $}(parse_state *state,{$ 'PyObject **fields { //% if ctype is appends_str PyObject **dest; - if(n->base.size && PyUnicode_CheckExact(n->base.content[n->base.size-1])) { - dest = &n->base.content[n->base.size-1]; - } else { - if(frozen_list_push_object(&n->base,NULL,NODE_LIST_INITIAL_CAPACITY)) return -1; - dest = &n->base.content[n->base.size-1]; - } + if((!n->base.size || !PyUnicode_CheckExact(n->base.content[n->base.size-1])) && + frozen_list_push_object(&n->base,NULL,NODE_LIST_INITIAL_CAPACITY)) return -1; + dest = &n->base.content[n->base.size-1]; //% else PyObject **dest = frozen_list_push_tagged_value(state->py,TAGGED_UNION_NAME__{$ cname $},&n->base,NODE_LIST_INITIAL_CAPACITY); if(dest == NULL) return -1; @@ -1861,7 +1866,7 @@ static int node_class_start__{$ type $}(parse_state *state,PyObject **dest,const //% else cb->cs_call = NULL; //% endif -//% if type is has_children_or_tuple_content +//% if type is needs_finish_call cb->f_call = node_class_finish__{$ type $}; //% else cb->f_call = NULL; @@ -1883,9 +1888,9 @@ static int node_class_child_start__{$ type $}(parse_state *state,const XML_Char return node_class_child__{$ type $}(state,{$ 'n->fields,' if type is has_fields $}element_lookup(child_name),attr); } //% endif -//% if type is has_children +//% if type is needs_finish_fields_call static int node_class_finish_fields__{$ type $}(parse_state *state,PyObject **fields) { -//% for b in type.bases|select('has_children') +//% for b in type.bases|select('needs_finish_fields_call') if(node_class_finish_fields__{$ b $}(state,fields+BASE_FIELD_OFFSET__{$ type $}__{$ b $})) return -1; //% endfor //% for ref in type|children @@ -1912,11 +1917,11 @@ static int node_class_finish_fields__{$ type $}(parse_state *state,PyObject **fi return 0; } //% endif -//% if type is has_children_or_tuple_content +//% if type is needs_finish_call static int node_class_finish__{$ type $}(parse_state *state) { assert(Py_TYPE(*top_callbacks(state)->value) == state->py->classes[CLASS__{$ type $}]); node_{$ common_affix(type) $}_common *n = (node_{$ common_affix(type) $}_common*)*top_callbacks(state)->value; -//% if type is has_children +//% if type is needs_finish_fields_call if(node_class_finish_fields__{$ type $}(state,n->fields)) return -1; //% endif //% if type is content_tuple and type.content|length > 1 diff --git a/xml_parser_generator/module_template.py.in b/xml_parser_generator/module_template.py.in new file mode 100644 index 00000000..008aec3a --- /dev/null +++ b/xml_parser_generator/module_template.py.in @@ -0,0 +1,692 @@ +""" +Python module to parse Doxygen's XML output. + +This module defines the following types: + +- TaggedValue + + TaggedValue is a name/value pair for tagged union values. + +- Node + + Node is an empty class used as a base type for all classes that start with + "Node_". + +- Node_X + + These classes are generated according the input schema. + +- ListItem_X + + Types that have "kind" equal to "tuple_list_element" in the schema also have + a companion class for their elements. It will have the same name as the main + class except it starts with ListItem_ instead of Node_. These are named + tuples. + +- ParseError + + The exception raised when there is a problem with the XML input that cannot + be ignored. + +- ParseWarning + + The warning class for possible problems in the XML input. Currently this + is only issued for unexpected elements and attributes. + + +Each non-simple type has some or all of the following entities: + +- _node_class_attr__X + + Attribute handlers for element X. + + This is a mapping of attribute names to functions that handle the + attributes. + +- def _node_class_attr_end__X(state: _ParseState, obj) + + This is called after all attributes are handled. It is used to check for + unset fields and to fill them with default values or raise an exception. + This is a separate function so that derived elements can use it. + +- _node_class_child__X + + Element handlers for element X. + + This is a mapping of element names to functions that handle the elements. + +- def _node_class_finish_fields__X(state: _ParseState, obj) + + This is called after all child elements are handled. It is used to check for + unset fields and to fill them with default values or raise an exception. + This is a separate function so that derived elements can use it. + + +If the type is used directly, it will have its own class and the following +function: + +- def _node_class_start__X(state: _ParseState, setter: Callable, attr: Iterable[tuple[str, str]]): + + This has three responsibilities: + - To create the object corresponding to element X. + - To handle the XML attributes. + - To add the new object and the appropriate XML event handlers to the top of + the parser stack. + + This function doesn't return a value immediately, instead "setter" is called + with the value when it's ready. + +""" + +from __future__ import annotations + +import enum +import warnings +import functools +from collections.abc import Iterable, Sequence +from xml.parsers import expat +from typing import Any, Callable, Literal, NamedTuple, NoReturn, TYPE_CHECKING + +try: + from types import GenericAlias +except ImportError: + GenericAlias = lambda cls: cls + + +if TYPE_CHECKING: + ChildStartCallback = Callable[["_ParseState", Any, Iterable[tuple[str, str]]], None] + FinishCallback = Callable[["_ParseState"], None] + TextCallback = Callable[["_ParseState", str], None] + Setter = Callable[[Any], None] + + +_GLOBAL_type = type +_GLOBAL_list = list + +class _ParseCallbacks: + __slots__ = "value", "setter", "cs_call", "f_call", "t_call" + + value: Any + """The value corresponding the currently visited XML element.""" + + setter: Setter | None + """A callback given by the parent element to consume the value. + + This may be None if no action is needed. + """ + + cs_call: dict[str, ChildStartCallback] | None + """A mapping of element names to callbacks for a children of the current + element. + + This may be None if no child elements are allowed. + """ + + f_call: FinishCallback | None + """A callback for when the current element is closed. + + This may be None if no action is needed. + """ + + t_call: TextCallback | None + """A callback for text contained directly inside the current element. + + This may be None if text is not allowed. If None, whitespace is ignored. + """ + + def __init__(self, value=None, setter=None, cs_call=None, f_call=None, t_call=None): + self.value = value + self.setter = setter + self.cs_call = cs_call + self.f_call = f_call + self.t_call = t_call + +class ParseError(RuntimeError): + @property + def message(self, /) -> str: + return self.args[0] + + @property + def lineno(self, /) -> int: + return self.args[1] + + def __str__(self, /) -> str: + if self.lineno is None: + return "Error: " + self.message + return f"Error on line {self.lineno}: {self.message}" + +class ParseWarning(UserWarning): + pass + + +class _ParseState: + def __init__(self, parser, /): + self.parser: expat.XMLParserType = parser + self.parse_callbacks: list[_ParseCallbacks] = [] + + # While this is greater than zero all XML content is ignored. + # + # This starts at zero. When an unexpected element start is encountered, + # a warning is issued (via PyErr_WarnFormat) and this is set to 1. Any + # subsequent element-starts increment this and element-ends decrement + # this until this is zero again, and normal parsing resumes. + self.ignore_level: int = 0 + + def start_element(self, name: str, attrs: dict[str, str], /) -> None: + if self.ignore_level: + self.ignore_level += 1 + return + + cb = self.parse_callbacks[-1] + + if cb.cs_call is not None: + handler = cb.cs_call.get(name) + if handler is not None: + handler(self, cb.value, attrs.items()) + return + + self.set_parse_warning(f'unexpected element "{name}"') + + self.ignore_level = 1 + + + def end_element(self, unused, /) -> None: + if self.ignore_level: + self.ignore_level -= 1 + return + + cb = self.parse_callbacks[-1] + + if cb.f_call is not None: + cb.f_call(self) + + if cb.setter is not None: + cb.setter(cb.value) + self.parse_callbacks.pop() + + def character_data(self, s: str, /) -> None: + if self.ignore_level: return + + cb = self.parse_callbacks[-1] + + if cb.t_call is not None: + cb.t_call(self, s) + elif s and not s.isspace(): + self.set_parse_warning("unexpected character data") + + def raise_parse_error(self, msg, /) -> NoReturn: + raise ParseError(msg, self.parser.CurrentLineNumber) + + def set_parse_warning(self, msg, /) -> None: + warnings.warn(ParseWarning(f'Warning on line {self.parser.CurrentLineNumber}: {msg}')) + + +class TaggedValue(NamedTuple): + name: str + value: Any + + __class_getitem__ = classmethod(GenericAlias) + + +class Node: + __slots__ = () + +class ListNode(list, Node): + __slots__ = () + + +def _node_list_common_text(state: _ParseState, data: str, /): + value = state.parse_callbacks[-1].value + + if value and type(value[-1]) is str: + value[-1] += data + else: + value.append(data) + + + +def _push_tuple_item( + state: _ParseState, + tuple_i: int, + tag_names: Sequence[str], + cls, + obj, + / +): + if tuple_i == 0: + if len(obj): + tuple_size = len(tag_names) + if len(obj[-1]) < tuple_size: + state.raise_parse_error( + f'"{tag_names[0]}" element can only come after "{tag_names[tuple_size-1]}" element or be the first in its group', + ) + + obj[-1] = cls._make(obj[-1]) + + # tuples are immutable so a list is used while collecting the values + new_tuple = [] + obj.append(new_tuple) + + return new_tuple.append + + + if not obj or len(obj[-1]) < tuple_i: + state.raise_parse_error( + f'"{tag_names[tuple_i]}" element can only come after "{tag_names[tuple_i-1]}" element' + ) + + return obj[-1].append + + +def _check_complete_tuple(state: _ParseState, tag_names: Sequence[str], cls, obj, /): + if obj: + last = obj[-1] + + if len(last) != len(tag_names): + state.raise_parse_error( + f'"{tag_names[len(last)]}" element must come after "{tag_names[len(last)-1]}" element' + ) + + obj[-1] = cls._make(last) + + +def _warn_unexpected_attribute(state: _ParseState, name: str, /): + state.set_parse_warning(f'unexpected attribute "{name}"') + +def _raise_missing_attribute_error(state: _ParseState, name: str, /): + state.raise_parse_error(f'missing "{name}" attribute') + +def _raise_duplicate_element_error(state: _ParseState, name: str, /): + state.raise_parse_error(f'"{name}" cannot appear more than once in this context') + +def _raise_missing_element_error(state: _ParseState, name: str, /): + state.raise_parse_error(f'missing "{name}" child') + +def _raise_empty_list_element_error(state: _ParseState, name: str, /): + state.raise_parse_error(f'at least one "{name}" child is required') + +def _raise_invalid_int_error(state: _ParseState, value: str, /): + state.raise_parse_error(f'"{value}" is not a valid integer') + +def _raise_invalid_enum_error(state: _ParseState, value: str, /): + state.raise_parse_error(f'"{value}" is not one of the allowed enumeration values') + +def _raise_invalid_char_enum_error(state: _ParseState, c: str, allowed: str, /): + state.raise_parse_error(f'"{c}" is not one of the allowed character values; must be one of "{allowed}"') + + +def _parse_DoxBool_attribute(state: _ParseState, name: str, value: str, /) -> bool: + if value == "yes": + return True + if value == "no": + return False + + state.raise_parse_error(f'"{name}" must be "yes" or "no"') + +def _node_string_text(state: _ParseState, data: str) -> None: + state.parse_callbacks[-1].value += data + +def _node_start_string(state: _ParseState, setter: Setter, attr: Iterable[tuple[str, str]], /): + for name, _ in attr: + _warn_unexpected_attribute(state, name) + + state.parse_callbacks.append(_ParseCallbacks('', setter, None, None, _node_string_text)) + + +def _node_start_empty(state: _ParseState, setter: Setter, attr: Iterable[tuple[str, str]], /): + for name, _ in attr: + _warn_unexpected_attribute(state, name) + + setter(None) + state.parse_callbacks.append(_ParseCallbacks()) + + +def _node_start_spType(state: _ParseState, attr: Iterable[tuple[str, str]], /): + c = ' ' + + for name, value in attr: + if name != "value": + _warn_unexpected_attribute(state, name) + + try: + c_i = int(value, 10) + except ValueError: + state.raise_parse_error('"value" must be a valid integer') + if 0 > c_i > 127: + state.raise_parse_error('"value" must be between 0 and 127') + + c = chr(c_i) + + state.parse_callbacks.append(_ParseCallbacks()) + return c + +def _node_start_const_char(state: _ParseState, attr: Iterable[tuple[str, str]], /): + for name, _ in attr: + _warn_unexpected_attribute(state, name) + + state.parse_callbacks.append(_ParseCallbacks()) + +def _union_codepoint_element(c): + def inner(state: _ParseState, obj, attr: Iterable[tuple[str, str]], /) -> None: + if obj and type(obj[-1]) is str: + obj[-1] += c + else: + obj.append(c) + + _node_start_const_char(state, attr) + return inner + +def _add_to_list(name): + def inner(f): + global _cur_list + _cur_list[name] = f + return inner + +//% for type in types +//% if type is element +//% if type is has_attributes +_node_class_attr__{$ type $} = _cur_list = {} +//% for b in type.bases|select('has_attributes') +_node_class_attr__{$ type $}.update(node_class_attr__{$ b $}) +//% endfor + +//% for attr in type|attributes +@_add_to_list("{$ attr.name $}") +def _a__{$ type $}__{$ attr.name $}(state: _ParseState, obj, value: str, /): +//% if attr.type is builtin_t +//% if attr.type.name == "string" + obj.{$ attr.py_name $} = value +//% elif attr.type.name == "integer" + try: + obj.{$ attr.py_name $} = int(value, 10) + except ValueError: + _raise_invalid_int_error(state, value) +//% else + obj.{$ attr.py_name $} = _parse_{$ attr.type $}_attribute(state, "{$ attr.name $}", value) +//% endif +//% elif attr.type is enumeration_t + try: + obj.{$ attr.py_name $} = {$ attr.type $}(value.strip()) + except ValueError: + _raise_invalid_enum_error(state, value) +//% else + obj.{$ attr.py_name $} = _parse__{$ attr.type $}(state, value) +//% endif + +//% endfor +def _node_class_attr_end__{$ type $}(state: _ParseState, obj, /): +//% for b in type.bases if b is has_attributes + _node_class_attr_end__{$ b $}(state, obj) +//% endfor +//% for ref in type|attributes + if not hasattr(obj, "{$ ref.py_name $}"): +//% if ref.optional + obj.{$ ref.py_name $} = None +//% else + _raise_missing_attribute_error(state,"{$ ref.name $}") +//% endif +//% endfor + +//% endif +//% if type is has_children_or_content +_node_class_child__{$ type $} = _cur_list = {} +//% for b in type.bases|select('has_children_or_content') +_node_class_child__{$ type $}.update(_node_class_child__{$ b $}) +//% endfor + +//% for cref in type|children +@_add_to_list("{$ cref.name $}") +def _e__{$ type $}__{$ cref.name $}(state: _ParseState, obj, attr: Iterable[tuple[str, str]], /): +//% if cref.is_list + _node_{$ 'start_' if cref.type is builtin_t else 'class_start__' $}{$ cref.type $}( + state, + obj.{$ cref.py_name $}.append, + attr{$ cref.type.extra_args $}) +//% else + if hasattr(obj, "{$ cref.py_name $}"): + _raise_duplicate_element_error(state, "{$ cref.name $}") + + _node_{$ 'start_' if cref.type is builtin_t else 'class_start__' $}{$ cref.type $}( + state, + functools.partial(setattr, obj, "{$ cref.py_name $}"), + attr{$ cref.type.extra_args $}) +//% endif + +//% endfor +//% for cname,ctype in type|content +//% if type is content_union and ctype is code_point_t +_add_to_list("{$ cname $}")(_union_codepoint_element(chr({$ ctype.char $}))) +//% else +@_add_to_list("{$ cname $}") +def _e__{$ type $}__{$ cname $}(state: _ParseState, obj, attr: Iterable[tuple[str, str]], /): +//% if type is content_tuple + _node_{$ 'start_' if ctype is builtin_t else 'class_start__' $}{$ ctype $}( + state, + _push_tuple_item( + state, + {$ loop.index0 $}, + _tuple_item_tag_names__{$ type $}, + ListItem_{$ type $}, + obj), + attr{$ ctype.extra_args $}) +//% elif type is content_union +//% if ctype is appends_str + c = _node_{$ 'start_' if ctype is builtin_t else 'class_start__' $}{$ ctype $}(state, attr) + if obj and type(obj[-1]) is str: + obj[-1] += c + else: + obj.append(c) +//% else + _node_{$ 'start_' if ctype is builtin_t else 'class_start__' $}{$ ctype $}( + state, + (lambda x: obj.append(TaggedValue("{$ cname $}", x))), + attr{$ ctype.extra_args $}) +//% endif +//% else + _node_{$ 'start_' if ctype is builtin_t else 'class_start__' $}{$ ctype $}( + state, + obj.append, + attr{$ ctype.extra_args $}) +//% endif +//% endif + +//% endfor +//% endif +//% if type is used_directly +//% if type is content_tuple +class ListItem_{$ type $}(NamedTuple): +//% for cname,ctype in type|content + {$ cname $}: {$ ctype.py_name $} +//% endfor + +_tuple_item_tag_names__{$ type $} = ListItem_{$ type $}._fields + +//% endif +class Node_{$ type $}({$ 'List' if type is list_e $}Node): + __slots__ = ( +//% for f in type.all_fields() + "{$ f.py_name $}", +//% endfor + ) + + _fields = __slots__ + +//% if type is list_e or type is has_fields + def __init__( + self, +//% if type is list_e + __children, +//% endif +//% for f in type.all_fields() if f is not optional + {$ f.py_name $}: {$ f.py_type(true) $}, +//% endfor +//% for f in type.all_fields() if f is optional + {$ f.py_name $}: {$ f.py_type(true) $} = {$ '()' if f is array_field else 'None' $}, +//% endfor + ): +//% if type is list_e + super().__init__(__children) +//% endif +//% for f in type.all_fields() +//% if f is array_field + self.{$ f.py_name $} = {$ f.py_name $} if _GLOBAL_type({$ f.py_name $}) is _GLOBAL_list else _GLOBAL_list({$ f.py_name $}) +//% else + self.{$ f.py_name $} = {$ f.py_name $} +//% endif +//% endfor +//% endif + + +def _node_class_start__{$ type $}(state: _ParseState, setter: Callable, attr: Iterable[tuple[str, str]], /): + n = Node_{$ type $}.__new__(Node_{$ type $}) + +//% for ref in type|children if ref.is_list + n.{$ ref.py_name $} = [] +//% endfor + +//% if type is has_attributes or type.other_attr == OtherAttrAction.error + for name, value in attr: +//% if type is has_attributes + handler = _node_class_attr__{$ type $}.get(name) + + if handler is not None: + handler(state, n, value) +//% if type.other_attr == OtherAttrAction.error + else: + _warn_unexpected_attribute(state, name) +//% endif +//% else + _warn_unexpected_attribute(state, name) +//% endif + +//% endif +//% if type is has_attributes + _node_class_attr_end__{$ type $}(state, n) +//% endif + + state.parse_callbacks.append(_ParseCallbacks( + n, + setter, +//% if type is has_children_or_content + _node_class_child__{$ type $}, +//% else + None, +//% endif +//% if type is needs_finish_call + _node_class_finish__{$ type $}, +//% else + None, +//% endif +//% if type is allow_text + _node_list_common_text, +//% else + None, +//% endif + )) + + +//% if type is needs_finish_fields_call +def _node_class_finish_fields__{$ type $}(state: _ParseState, obj, /) -> None: +//% for b in type.bases|select('needs_finish_fields_call') + _node_class_finish_fields__{$ b $}(state, obj) +//% endfor +//% for ref in type|children +//% if ref.min_items +//% if ref.is_list + if len(obj.{$ ref.py_name $}) < 1: + _raise_empty_list_element_error(state,"{$ ref.name $}") +//% else + if not hasattr(obj, "{$ ref.py_name $}"): + _raise_missing_element_error(state,"{$ ref.name $}") +//% endif +//% elif not ref.is_list + if not hasattr(obj, "{$ ref.py_name $}"): + obj.{$ ref.py_name $} = None +//% endif +//% endfor + +//% endif +//% if type is needs_finish_call +def _node_class_finish__{$ type $}(state: _ParseState, /): + n = state.parse_callbacks[-1].value +//% if type is needs_finish_fields_call + _node_class_finish_fields__{$ type $}(state, n) +//% endif +//% if type is content_tuple and type.content|length > 1 + _check_complete_tuple(state, _tuple_item_tag_names__{$ type $}, ListItem_{$ type $}, n) +//% endif + +//% endif +//% endif +//% elif type is enumeration_t +class {$ type $}(enum.Enum): +//% for entry in type.children + {$ entry.id $} = "{$ entry.xml $}" +//% endfor + +//% elif type is char_enum_t +{$ type $} = Literal[{% for c in type.values %}{$ "'"~c~"'" $}{$ ',' if not loop.last $}{% endfor %}] + +def _parse__{$ type $}(state: _ParseState, data, /): + data = data.strip() + if len(data) != 1: + state.raise_parse_error("value must be a single character") + + if data not in "{$ type.values $}": + _raise_invalid_char_enum_error(state, data,"{$ type.values $}") + + return data + +//% endif +//% endfor + +_top_level_handlers = _cur_list = {} + +//% for name,type in root_elements +@_add_to_list("{$ name $}") +def _(state: _ParseState, obj, attr: Iterable[tuple[str, str]], /): + cb = state.parse_callbacks[-1] + if obj is not None: + state.raise_parse_error("cannot have more than one root element") + + def setter(x): + cb.value = TaggedValue("{$ name $}", x) + + return _node_{$ 'start_' if ctype is builtin_t else 'class_start__' $}{$ type $}(state, setter, attr) +//% endfor + + +def _parse(obj, meth, /): + p = expat.ParserCreate() + state = _ParseState(p) + + state.parse_callbacks.append(_ParseCallbacks( + None, + None, + _top_level_handlers)) + + p.StartElementHandler = state.start_element + p.EndElementHandler = state.end_element + p.CharacterDataHandler = state.character_data + + try: + meth(p, obj) + except expat.ExpatError as e: + raise ParseError(expat.errors.messages[e.code], e.lineno) + finally: + # break reference cycle for faster garbage collection + p.StartElementHandler = None + p.EndElementHandler = None + p.CharacterDataHandler = None + + value = state.parse_callbacks[0].value + if value is None: + raise ParseError("document without a recognized root element", None) + + return value + +def parse_str(data: str, /): + return _parse(data, expat.XMLParserType.Parse) + +def parse_file(file, /): + return _parse(file, expat.XMLParserType.ParseFile) diff --git a/xml_parser_generator/setuptools_builder.py b/xml_parser_generator/setuptools_builder.py index 7b7674bb..927028cf 100644 --- a/xml_parser_generator/setuptools_builder.py +++ b/xml_parser_generator/setuptools_builder.py @@ -45,16 +45,19 @@ def finalize_options(self): class CustomBuildExt(build_ext): - """Extend build_ext to automatically generate _parser.c""" + """Extend build_ext to automatically generate the parser module""" user_options = build_ext.user_options + extra_user_options SCHEMA_FILE = os.path.join("xml_parser_generator", "schema.json") MODULE_TEMPLATE = os.path.join("xml_parser_generator", "module_template.c.in") + PY_MODULE_TEMPLATE = os.path.join("xml_parser_generator", "module_template.py.in") STUBS_TEMPLATE = os.path.join("xml_parser_generator", "stubs_template.pyi.in") MAKER_SOURCE = os.path.join("xml_parser_generator", "make_parser.py") - DEPENDENCIES = [SCHEMA_FILE, MODULE_TEMPLATE, STUBS_TEMPLATE, MAKER_SOURCE] + M_DEPENDENCIES = [SCHEMA_FILE, MODULE_TEMPLATE, MAKER_SOURCE] + PY_M_DEPENDENCIES = [SCHEMA_FILE, PY_MODULE_TEMPLATE, MAKER_SOURCE] + S_DEPENDENCIES = [SCHEMA_FILE, STUBS_TEMPLATE, MAKER_SOURCE] def initialize_options(self): super().initialize_options() @@ -86,26 +89,31 @@ def build_extensions(self): source = os.path.join(self.build_temp, self.extensions[0].name + ".c") - # put the stub file in the same place that the extension module will be + # put the stub and Python file in the same place that the extension + # module will be ext_dest = self.get_ext_fullpath(self.extensions[0].name) libdir = os.path.dirname(ext_dest) stub = os.path.join(libdir, self.extensions[0].name + ".pyi") + py_source = os.path.join(libdir, self.extensions[0].name + "_py.py") mkpath(self.build_temp, dry_run=self.dry_run) mkpath(libdir, dry_run=self.dry_run) - if ( - self.force - or newer_group(self.DEPENDENCIES, source) - or newer_group(self.DEPENDENCIES, stub) + regen = [] + for dep, out, tmpl in ( + (self.M_DEPENDENCIES, source, self.MODULE_TEMPLATE), + (self.PY_M_DEPENDENCIES, py_source, self.PY_MODULE_TEMPLATE), + (self.S_DEPENDENCIES, stub, self.STUBS_TEMPLATE), ): - log.info(f'generating "{source}" and "{stub}" from templates') + if self.force or newer_group(dep, out): + regen.append((tmpl, out)) + + if regen: + log.info("generating module source from templates") if not self.dry_run: - make_parser.generate_from_json( - self.SCHEMA_FILE, self.MODULE_TEMPLATE, self.STUBS_TEMPLATE, source, stub - ) + make_parser.generate_from_json(self.SCHEMA_FILE, regen) else: - log.debug(f'"{source}" and "{stub}" are up-to-date') + log.debug(f'"{source}", "{py_source}" and "{stub}" are up-to-date') self.extensions[0].sources.append(source) From d479834fe41d089ebe2423972db5e8934f6d9915 Mon Sep 17 00:00:00 2001 From: Rouslan Korneychuk Date: Sun, 14 Jan 2024 15:27:33 -0500 Subject: [PATCH 57/65] Removed compiled version of parser --- MANIFEST.in | 2 +- breathe/parser.py | 50 +-- pyproject.toml | 6 +- requirements/development.txt | 1 - scripts/doxygen_cache.py | 7 +- setup.cfg | 1 - setup.py | 21 +- tests/data/examples/test_xrefsect/compare.xml | 40 ++- tests/data/examples/test_xrefsect/input.rst | 6 + xml_parser_generator/make_parser.py | 36 +-- xml_parser_generator/module_template.py.in | 285 ++++++++++++------ xml_parser_generator/setuptools_builder.py | 217 +++++++------ 12 files changed, 386 insertions(+), 286 deletions(-) diff --git a/MANIFEST.in b/MANIFEST.in index db7a32bc..04d203b9 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,5 +1,5 @@ recursive-include xml_parser_generator *.py *.in *.json include requirements/*.txt tests/*.py graft tests/data -exclude breathe/_parser.pyi +exclude breathe/_parser.py global-exclude *.py[cod] \ No newline at end of file diff --git a/breathe/parser.py b/breathe/parser.py index 143a48d4..6381db6b 100644 --- a/breathe/parser.py +++ b/breathe/parser.py @@ -6,7 +6,7 @@ import collections from breathe import file_state_cache, path_handler from breathe.project import ProjectInfo - +from breathe._parser import * from sphinx.application import Sphinx @@ -18,41 +18,19 @@ T_inv = TypeVar("T_inv") -try: - from breathe._parser import * -except ImportError: - from breathe._parser_py import * -else: - @reprlib.recursive_repr() - def node_repr(self: Node) -> str: # pragma: no cover - cls = type(self) - fields = [] - if isinstance(self, FrozenList): - pos = ", ".join(map(repr, self)) - fields.append(f"[{pos}]") - fields.extend(f"{field}={getattr(self,field)!r}" for field in cls._fields) - inner = ", ".join(fields) - return f"{cls.__name__}({inner})" - - - Node.__repr__ = node_repr # type: ignore - - - @reprlib.recursive_repr() - def taggedvalue_repr(self: TaggedValue) -> str: # pragma: no cover - return f"{self.__class__.__name__}({self.name!r}, {self.value!r})" - - - TaggedValue.__repr__ = taggedvalue_repr # type: ignore - - - @reprlib.recursive_repr() - def frozenlist_repr(self: FrozenList) -> str: # pragma: no cover - inner = ", ".join(map(repr, self)) - return f"{self.__class__.__name__}([{inner}])" +@reprlib.recursive_repr() +def node_repr(self: Node) -> str: # pragma: no cover + cls = type(self) + fields = [] + if isinstance(self, list): + pos = ", ".join(map(repr, self)) + fields.append(f"[{pos}]") + fields.extend(f"{field}={getattr(self,field)!r}" for field in cls._fields) + inner = ", ".join(fields) + return f"{cls.__name__}({inner})" - FrozenList.__repr__ = frozenlist_repr # type: ignore +Node.__repr__ = node_repr # type: ignore def description_has_content(node: Node_descriptionType | None) -> bool: @@ -188,7 +166,7 @@ def parse_compound(self, refid: str, project_info: ProjectInfo) -> DoxygenCompou @overload -def tag_name_value(x: TaggedValue[T, U]) -> tuple[T, U]: +def tag_name_value(x: TaggedValue[T_covar, U_covar]) -> tuple[T_covar, U_covar]: ... @@ -198,7 +176,7 @@ def tag_name_value(x: str) -> tuple[None, str]: @overload -def tag_name_value(x: TaggedValue[T, U] | str) -> tuple[T | None, U | str]: +def tag_name_value(x: TaggedValue[T_covar, U_covar] | str) -> tuple[T_covar | None, U_covar | str]: ... diff --git a/pyproject.toml b/pyproject.toml index 42f05e49..02d0a054 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,5 +1,5 @@ [build-system] -requires = ["setuptools", "jinja2", "perfect-hash"] +requires = ["setuptools", "jinja2"] build-backend = "setuptools.build_meta" [project] @@ -43,7 +43,3 @@ line-length = 100 extend-exclude = ''' ^/examples/.* | ^/tests/data/.* ''' - -[tool.cibuildwheel] -test-requires = "pytest" -test-command = "BREATHE_DOXYGEN_TEST_CACHE={project}/tests/data/examples/_cache pytest {project}/tests" diff --git a/requirements/development.txt b/requirements/development.txt index f2fd457a..731920b8 100644 --- a/requirements/development.txt +++ b/requirements/development.txt @@ -13,4 +13,3 @@ black==22.3.0 sphinx-copybutton furo -perfect-hash diff --git a/scripts/doxygen_cache.py b/scripts/doxygen_cache.py index 96b27407..453ceb67 100644 --- a/scripts/doxygen_cache.py +++ b/scripts/doxygen_cache.py @@ -1,9 +1,4 @@ -"""Run Doxygen on all test samples and save the results. - -This allows running the tests in multiple Docker containers with different -architectures, without requiring each of them to download and build a specific -version of Doxygen. -""" +"""Run Doxygen on all test samples and save the results.""" import os import pathlib diff --git a/setup.cfg b/setup.cfg index a7013d04..e4c67369 100644 --- a/setup.cfg +++ b/setup.cfg @@ -5,7 +5,6 @@ per-file-ignores = breathe/parser/index.py:E305 [bdist_wheel] -py-limited-api = cp38 universal = 0 [options] diff --git a/setup.py b/setup.py index 97d41cab..a382f349 100644 --- a/setup.py +++ b/setup.py @@ -1,14 +1,14 @@ # -*- coding: utf-8 -*- import sys import os.path -from setuptools import setup, Extension +from setuptools import setup # add xml_parser_generator to the import path list base_dir = os.path.dirname(os.path.realpath(__file__)) sys.path.insert(0, os.path.join(base_dir, "xml_parser_generator")) -from setuptools_builder import CustomBuild, CustomBuildExt +from setuptools_builder import CustomBuildPy long_desc = """ @@ -18,20 +18,5 @@ setup( long_description=long_desc, - ext_package="breathe", - ext_modules=[ - Extension( - "_parser", - [], # source is generated by CustomBuildExt - depends=CustomBuildExt.M_DEPENDENCIES, - libraries=["expat"], - define_macros=[ - ("PARSER_PY_LIMITED_API", "0x03080000"), # set Stable ABI version to 3.8 - ("MODULE_NAME", "_parser"), - ("FULL_MODULE_STR", '"breathe._parser"'), - ], - py_limited_api=True, - ) - ], - cmdclass={"build": CustomBuild, "build_ext": CustomBuildExt}, + cmdclass={"build_py": CustomBuildPy}, ) diff --git a/tests/data/examples/test_xrefsect/compare.xml b/tests/data/examples/test_xrefsect/compare.xml index af7b2b2f..f460b953 100644 --- a/tests/data/examples/test_xrefsect/compare.xml +++ b/tests/data/examples/test_xrefsect/compare.xml @@ -9,7 +9,7 @@ int unimplementedvoid An example of using Doxygen’s todo command. - Todo:Implement this function. + Todo:Implement this function. @@ -17,8 +17,8 @@ void buggy_functionint param An example of using Doxygen’s bug and test commands. - Bug:Does not work yet. - Test:Add proper unit testing first. + Bug:Does not work yet. + Test:Add proper unit testing first. @@ -26,7 +26,7 @@ void old_functionvoid An example of using Doxygen’s deprecated command. - Deprecated:Should not be used on new code. + Deprecated:Should not be used on new code. @@ -34,8 +34,38 @@ void sample_xrefitem_functionvoid An example of a custom Doxygen xrefitem declared as an ALIAS. - xref Sample:This text shows up in the xref output. + xref Sample:This text shows up in the xref output. + + page todo + + Member unimplemented (void)Implement this function. + + + + page bug + + Member buggy_function (int param)Does not work yet. + + + + page test + + Member buggy_function (int param)Add proper unit testing first. + + + + page deprecated + + Member old_function (void)Should not be used on new code. + + + + page xrefsample + + Member sample_xrefitem_function (void) This text shows up in the xref output. + + diff --git a/tests/data/examples/test_xrefsect/input.rst b/tests/data/examples/test_xrefsect/input.rst index 35e0d8e0..135370bd 100644 --- a/tests/data/examples/test_xrefsect/input.rst +++ b/tests/data/examples/test_xrefsect/input.rst @@ -1 +1,7 @@ .. doxygenfile:: xrefsect.h + +.. doxygenpage:: todo +.. doxygenpage:: bug +.. doxygenpage:: test +.. doxygenpage:: deprecated +.. doxygenpage:: xrefsample diff --git a/xml_parser_generator/make_parser.py b/xml_parser_generator/make_parser.py index 64321015..a9907e0b 100644 --- a/xml_parser_generator/make_parser.py +++ b/xml_parser_generator/make_parser.py @@ -15,7 +15,7 @@ from typing import Any, Callable, cast, Literal, NamedTuple, NoReturn, TYPE_CHECKING, TypeVar import jinja2 -import perfect_hash +#import perfect_hash if TYPE_CHECKING: from collections.abc import Iterable, Sequence @@ -353,7 +353,7 @@ def py_union_ref(self) -> list[str]: return types return ["ListItem_" + self.name] - def py_union_list(self) -> list[str]: + def py_union_list(self, quote=False) -> list[str]: """Return a list of type annotations, the union of which, represent every possible value of this array's elements. @@ -370,7 +370,7 @@ def py_union_list(self) -> list[str]: needs_str = True types = [ "TaggedValue[Literal[{}], {}]".format( - comma_join(sorted(f"'{n}'" for n in names), 26), t + comma_join(sorted(f"'{n}'" for n in names), 26), f'"{t}"' if quote else t ) for t, names in by_type.items() ] @@ -542,13 +542,13 @@ class HashData(NamedTuple): g: list[int] -def generate_hash(items: list[str]) -> HashData: - try: - f1, f2, g = perfect_hash.generate_hash(items) - return HashData(f1.salt, f2.salt, g) - except ValueError: - print(items, file=sys.stderr) - raise +#def generate_hash(items: list[str]) -> HashData: +# try: +# f1, f2, g = perfect_hash.generate_hash(items) +# return HashData(f1.salt, f2.salt, g) +# except ValueError: +# print(items, file=sys.stderr) +# raise def collect_field_names( @@ -579,10 +579,10 @@ def field_count(t) -> int: return len(t.attributes) + len(t.children) + sum(cast(int, field_count(b)) for b in t.bases) for t in schema.types.values(): - if isinstance(t, SchemaEnum): - if len(t.children) >= HASH_LOOKUP_THRESHOLD: - t.hash = generate_hash([item.xml for item in t.children]) - elif isinstance(t, SchemaCharEnum): + #if isinstance(t, SchemaEnum): + # if len(t.children) >= HASH_LOOKUP_THRESHOLD: + # t.hash = generate_hash([item.xml for item in t.children]) + if isinstance(t, SchemaCharEnum): char_enum_chars.update(t.values) elif isinstance(t, ElementType): fields: set[str] = set() @@ -735,7 +735,7 @@ def __call__(self): # types sorted topologically with regard to base elements sorted_types: list[SchemaType] = [] - visited_types: set[SchemaType] = set() + visited_types: set[int] = set() for t in schema.types.values(): t.add_sorted(sorted_types, visited_types) @@ -790,9 +790,9 @@ def __call__(self): "element_names": elements, "attribute_names": attributes, "py_field_names": py_field_names, - "e_hash": generate_hash(elements), - "a_hash": generate_hash(attributes), - "py_f_hash": generate_hash(py_field_names), + #"e_hash": generate_hash(elements), + #"a_hash": generate_hash(attributes), + #"py_f_hash": generate_hash(py_field_names), "union_tag_names": sorted(tag_names), "char_enum_chars": {c: i for i, c in enumerate(sorted(char_enum_chars))}, "list_element_field_counts": list(list_element_field_counts), diff --git a/xml_parser_generator/module_template.py.in b/xml_parser_generator/module_template.py.in index 008aec3a..06acf4b4 100644 --- a/xml_parser_generator/module_template.py.in +++ b/xml_parser_generator/module_template.py.in @@ -85,50 +85,215 @@ import warnings import functools from collections.abc import Iterable, Sequence from xml.parsers import expat -from typing import Any, Callable, Literal, NamedTuple, NoReturn, TYPE_CHECKING +from typing import ( + Any, Callable, ClassVar, Generic, Literal, NamedTuple, NoReturn, TYPE_CHECKING, TypeVar +) + +if TYPE_CHECKING: + import sys + + if sys.version_info >= (3, 11): + from typing import TypeAlias + else: + from typing_extensions import TypeAlias + + T = TypeVar("T") + T_covar = TypeVar("T_covar", covariant=True) + U_covar = TypeVar("U_covar", covariant=True) + + +class ParseError(RuntimeError): + @property + def message(self, /) -> str: + return self.args[0] + + @property + def lineno(self, /) -> int: + return self.args[1] + + def __str__(self, /) -> str: + if self.lineno is None: + return "Error: " + self.message + return f"Error on line {self.lineno}: {self.message}" + +class ParseWarning(UserWarning): + pass -try: - from types import GenericAlias -except ImportError: - GenericAlias = lambda cls: cls +class Node: + __slots__ = () + + _fields: ClassVar[tuple[str, ...]] +# This needs to run on Python 3.8, where built-in types don't implement +# __class_getitem__, and Python 3.9 and 3.10, which don't allow +# multiple-inheritance with NamedTuple. if TYPE_CHECKING: - ChildStartCallback = Callable[["_ParseState", Any, Iterable[tuple[str, str]]], None] - FinishCallback = Callable[["_ParseState"], None] - TextCallback = Callable[["_ParseState", str], None] - Setter = Callable[[Any], None] + class ListNode(list[T], Node, Generic[T]): + ... + + class TaggedValue(NamedTuple, Generic[T_covar, U_covar]): + name: T_covar + value: U_covar +else: + class TaggedValue(NamedTuple): + name: str + value: Any + + __class_getitem__ = classmethod(lambda cls, x: cls) + + class ListNode(list, Node): + __slots__ = () + + __class_getitem__ = classmethod(lambda cls, x: cls) + + +//% macro emit_fields(type) +{%- for b in type.bases %}{$ emit_fields(b) $}{% endfor -%} +//% for ref in type|attributes + {$ ref.py_name $}: {$ ref.py_type() $} +//% endfor +//% for ref in type|children + {$ ref.py_name $}: {$ ref.py_type() $} +//% endfor +//% endmacro + +//% macro emit_content_fields(type) +{%- for b in type.bases %}{$ emit_content_fields(b) $}{% endfor -%} +//% for cname,ctype in type|content + {$ cname $}: {$ ctype.py_name $} +//% endfor +//% endmacro + + +//% for type in types +//% if type is element +//% if type is content_union +//% set members = type.py_union_list(true)|sort +//% if members|length > 1 +if TYPE_CHECKING: + ListItem_{$ type $}: TypeAlias = ( +//% for m in members + {$ '| ' if not loop.first $}{$ m $} +//% endfor +) + +//% endif +//% endif +//% if type is used_directly +//% if type is content_tuple +//% set list_item_type = 'ListItem_'~type +class ListItem_{$ type $}(NamedTuple): +//% for cname,ctype in type|content + {$ cname $}: {$ ctype.py_name $} +//% endfor + +//% elif type is content_union +//% if members|length > 1 +//% set list_item_type = 'ListItem_'~type +//% else +//% set list_item_type = members|first +//% endif +//% elif type is content_bare +//% set list_item_type = (type|content|first)[1].py_name +//% elif type is list_e +{$ "invalid content type"|error $} +//% endif +class Node_{$ type $}({$ 'ListNode["'~list_item_type~'"]' if type is list_e else 'Node' $}): + __slots__ = ( +//% for f in type.all_fields() + "{$ f.py_name $}", +//% endfor + ) + + _fields = __slots__ + +//% if type is list_e or type is has_fields + def __init__( + self, +//% if type is list_e + __children, +//% endif +//% for f in type.all_fields() if f is not optional + {$ f.py_name $}: {$ f.py_type(true) $}, +//% endfor +//% for f in type.all_fields() if f is optional + {$ f.py_name $}: {$ f.py_type(true) $} = {$ '()' if f is array_field else 'None' $}, +//% endfor + ): # pragma: no cover +//% if type is list_e + super().__init__(__children) +//% endif +//% for f in type.all_fields() +//% if f is array_field + self.{$ f.py_name $} = {$ f.py_name $} if _GLOBAL_type({$ f.py_name $}) is _GLOBAL_list else _GLOBAL_list({$ f.py_name $}) +//% else + self.{$ f.py_name $} = {$ f.py_name $} +//% endif +//% endfor + +//% endif +//% endif +//% elif type is enumeration_t +class {$ type $}(enum.Enum): +//% for entry in type.children + {$ entry.id $} = "{$ entry.xml $}" +//% endfor + +//% elif type is char_enum_t +{$ type $} = Literal[{% for c in type.values %}{$ "'"~c~"'" $}{$ ',' if not loop.last $}{% endfor %}] +//% endif +//% endfor + + +def parse_str(data: str, /): + return _parse(data, expat.XMLParserType.Parse) + +def parse_file(file, /): + return _parse(file, expat.XMLParserType.ParseFile) + + + +if TYPE_CHECKING: + _ChildStartCallback = Callable[["_ParseState", Any, Iterable[tuple[str, str]]], None] + _FinishCallback = Callable[["_ParseState"], None] + _TextCallback = Callable[["_ParseState", str], None] + _Setter = Callable[[Any], None] + + _T_covar = TypeVar("_T_covar", covariant=True) + _U_covar = TypeVar("_U_covar", covariant=True) _GLOBAL_type = type _GLOBAL_list = list + class _ParseCallbacks: __slots__ = "value", "setter", "cs_call", "f_call", "t_call" value: Any """The value corresponding the currently visited XML element.""" - setter: Setter | None + setter: _Setter | None """A callback given by the parent element to consume the value. This may be None if no action is needed. """ - cs_call: dict[str, ChildStartCallback] | None + cs_call: dict[str, _ChildStartCallback] | None """A mapping of element names to callbacks for a children of the current element. This may be None if no child elements are allowed. """ - f_call: FinishCallback | None + f_call: _FinishCallback | None """A callback for when the current element is closed. This may be None if no action is needed. """ - t_call: TextCallback | None + t_call: _TextCallback | None """A callback for text contained directly inside the current element. This may be None if text is not allowed. If None, whitespace is ignored. @@ -141,23 +306,6 @@ class _ParseCallbacks: self.f_call = f_call self.t_call = t_call -class ParseError(RuntimeError): - @property - def message(self, /) -> str: - return self.args[0] - - @property - def lineno(self, /) -> int: - return self.args[1] - - def __str__(self, /) -> str: - if self.lineno is None: - return "Error: " + self.message - return f"Error on line {self.lineno}: {self.message}" - -class ParseWarning(UserWarning): - pass - class _ParseState: def __init__(self, parser, /): @@ -221,20 +369,6 @@ class _ParseState: warnings.warn(ParseWarning(f'Warning on line {self.parser.CurrentLineNumber}: {msg}')) -class TaggedValue(NamedTuple): - name: str - value: Any - - __class_getitem__ = classmethod(GenericAlias) - - -class Node: - __slots__ = () - -class ListNode(list, Node): - __slots__ = () - - def _node_list_common_text(state: _ParseState, data: str, /): value = state.parse_callbacks[-1].value @@ -244,7 +378,6 @@ def _node_list_common_text(state: _ParseState, data: str, /): value.append(data) - def _push_tuple_item( state: _ParseState, tuple_i: int, @@ -326,14 +459,14 @@ def _parse_DoxBool_attribute(state: _ParseState, name: str, value: str, /) -> bo def _node_string_text(state: _ParseState, data: str) -> None: state.parse_callbacks[-1].value += data -def _node_start_string(state: _ParseState, setter: Setter, attr: Iterable[tuple[str, str]], /): +def _node_start_string(state: _ParseState, setter: _Setter, attr: Iterable[tuple[str, str]], /): for name, _ in attr: _warn_unexpected_attribute(state, name) state.parse_callbacks.append(_ParseCallbacks('', setter, None, None, _node_string_text)) -def _node_start_empty(state: _ParseState, setter: Setter, attr: Iterable[tuple[str, str]], /): +def _node_start_empty(state: _ParseState, setter: _Setter, attr: Iterable[tuple[str, str]], /): for name, _ in attr: _warn_unexpected_attribute(state, name) @@ -494,48 +627,14 @@ def _e__{$ type $}__{$ cname $}(state: _ParseState, obj, attr: Iterable[tuple[st //% endif //% if type is used_directly //% if type is content_tuple -class ListItem_{$ type $}(NamedTuple): -//% for cname,ctype in type|content - {$ cname $}: {$ ctype.py_name $} -//% endfor - _tuple_item_tag_names__{$ type $} = ListItem_{$ type $}._fields +//% elif type is content_union +//% elif type is content_bare +//% set list_item_type = (type|content|first)[1].py_name +//% elif type is list_e +{$ "invalid content type"|error $} //% endif -class Node_{$ type $}({$ 'List' if type is list_e $}Node): - __slots__ = ( -//% for f in type.all_fields() - "{$ f.py_name $}", -//% endfor - ) - - _fields = __slots__ - -//% if type is list_e or type is has_fields - def __init__( - self, -//% if type is list_e - __children, -//% endif -//% for f in type.all_fields() if f is not optional - {$ f.py_name $}: {$ f.py_type(true) $}, -//% endfor -//% for f in type.all_fields() if f is optional - {$ f.py_name $}: {$ f.py_type(true) $} = {$ '()' if f is array_field else 'None' $}, -//% endfor - ): -//% if type is list_e - super().__init__(__children) -//% endif -//% for f in type.all_fields() -//% if f is array_field - self.{$ f.py_name $} = {$ f.py_name $} if _GLOBAL_type({$ f.py_name $}) is _GLOBAL_list else _GLOBAL_list({$ f.py_name $}) -//% else - self.{$ f.py_name $} = {$ f.py_name $} -//% endif -//% endfor -//% endif - def _node_class_start__{$ type $}(state: _ParseState, setter: Callable, attr: Iterable[tuple[str, str]], /): n = Node_{$ type $}.__new__(Node_{$ type $}) @@ -618,15 +717,7 @@ def _node_class_finish__{$ type $}(state: _ParseState, /): //% endif //% endif -//% elif type is enumeration_t -class {$ type $}(enum.Enum): -//% for entry in type.children - {$ entry.id $} = "{$ entry.xml $}" -//% endfor - //% elif type is char_enum_t -{$ type $} = Literal[{% for c in type.values %}{$ "'"~c~"'" $}{$ ',' if not loop.last $}{% endfor %}] - def _parse__{$ type $}(state: _ParseState, data, /): data = data.strip() if len(data) != 1: @@ -684,9 +775,3 @@ def _parse(obj, meth, /): raise ParseError("document without a recognized root element", None) return value - -def parse_str(data: str, /): - return _parse(data, expat.XMLParserType.Parse) - -def parse_file(file, /): - return _parse(file, expat.XMLParserType.ParseFile) diff --git a/xml_parser_generator/setuptools_builder.py b/xml_parser_generator/setuptools_builder.py index 927028cf..7bbf611f 100644 --- a/xml_parser_generator/setuptools_builder.py +++ b/xml_parser_generator/setuptools_builder.py @@ -2,12 +2,13 @@ import os.path -try: - from setuptools.command.build import build -except ImportError: - from distutils.command.build import build +#try: +# from setuptools.command.build import build +#except ImportError: +# from distutils.command.build import build -from setuptools.command.build_ext import build_ext +#from setuptools.command.build_ext import build_ext +from setuptools.command.build_py import build_py try: from setuptools.modified import newer_group @@ -16,105 +17,131 @@ from distutils import log from distutils.dir_util import mkpath -from distutils.util import split_quoted +#from distutils.util import split_quoted import make_parser -extra_user_options = [ - ("cpp-opts=", None, "extra command line arguments for the compiler"), - ("ld-opts=", None, "extra command line arguments for the linker"), -] - - -class CustomBuild(build): - """Add extra parameters for 'build' to pass to 'build_ext'""" - - user_options = build.user_options + extra_user_options - - def initialize_options(self): - super().initialize_options() - self.cpp_opts = "" - self.ld_opts = "" - - def finalize_options(self): - super().finalize_options() - self.cpp_opts = split_quoted(self.cpp_opts) - self.ld_opts = split_quoted(self.ld_opts) - - -class CustomBuildExt(build_ext): - """Extend build_ext to automatically generate the parser module""" - - user_options = build_ext.user_options + extra_user_options - +#extra_user_options = [ +# ("cpp-opts=", None, "extra command line arguments for the compiler"), +# ("ld-opts=", None, "extra command line arguments for the linker"), +#] + + +#class CustomBuild(build): +# """Add extra parameters for 'build' to pass to 'build_ext'""" +# +# user_options = build.user_options + extra_user_options +# +# def initialize_options(self): +# super().initialize_options() +# self.cpp_opts = "" +# self.ld_opts = "" +# +# def finalize_options(self): +# super().finalize_options() +# self.cpp_opts = split_quoted(self.cpp_opts) +# self.ld_opts = split_quoted(self.ld_opts) + + +#class CustomBuildExt(build_ext): +# """Extend build_ext to automatically generate the parser module""" +# +# user_options = build_ext.user_options + extra_user_options +# +# SCHEMA_FILE = os.path.join("xml_parser_generator", "schema.json") +# MODULE_TEMPLATE = os.path.join("xml_parser_generator", "module_template.c.in") +# PY_MODULE_TEMPLATE = os.path.join("xml_parser_generator", "module_template.py.in") +# STUBS_TEMPLATE = os.path.join("xml_parser_generator", "stubs_template.pyi.in") +# MAKER_SOURCE = os.path.join("xml_parser_generator", "make_parser.py") +# +# M_DEPENDENCIES = [SCHEMA_FILE, MODULE_TEMPLATE, MAKER_SOURCE] +# PY_M_DEPENDENCIES = [SCHEMA_FILE, PY_MODULE_TEMPLATE, MAKER_SOURCE] +# S_DEPENDENCIES = [SCHEMA_FILE, STUBS_TEMPLATE, MAKER_SOURCE] +# +# def initialize_options(self): +# super().initialize_options() +# self.cpp_opts = None +# self.ld_opts = None +# +# def finalize_options(self): +# if self.cpp_opts is not None: +# self.cpp_opts = split_quoted(self.cpp_opts) +# if self.ld_opts is not None: +# self.ld_opts = split_quoted(self.ld_opts) +# +# self.set_undefined_options("build", ("cpp_opts", "cpp_opts"), ("ld_opts", "ld_opts")) +# super().finalize_options() +# +# def build_extensions(self): +# assert len(self.extensions) == 1 +# +# if not self.debug: +# # The parser doesn't do any complicated calculation; its speed will +# # mostly depend on file read and memory allocation speed. Thus it's +# # better to optimize for size. +# c = self.compiler.compiler_type +# if c == "msvc": +# self.extensions[0].extra_compile_args = ["/O1"] +# elif c in {"unix", "cygwin", "mingw32"}: +# self.extensions[0].extra_compile_args = ["-Os"] +# self.extensions[0].extra_link_args = ["-s"] +# +# source = os.path.join(self.build_temp, self.extensions[0].name + ".c") +# +# # put the stub and Python file in the same place that the extension +# # module will be +# ext_dest = self.get_ext_fullpath(self.extensions[0].name) +# libdir = os.path.dirname(ext_dest) +# stub = os.path.join(libdir, self.extensions[0].name + ".pyi") +# py_source = os.path.join(libdir, self.extensions[0].name + "_py.py") +# +# mkpath(self.build_temp, dry_run=self.dry_run) +# mkpath(libdir, dry_run=self.dry_run) +# +# regen = [] +# for dep, out, tmpl in ( +# (self.M_DEPENDENCIES, source, self.MODULE_TEMPLATE), +# (self.PY_M_DEPENDENCIES, py_source, self.PY_MODULE_TEMPLATE), +# (self.S_DEPENDENCIES, stub, self.STUBS_TEMPLATE), +# ): +# if self.force or newer_group(dep, out): +# regen.append((tmpl, out)) +# +# if regen: +# log.info("generating module source from templates") +# if not self.dry_run: +# make_parser.generate_from_json(self.SCHEMA_FILE, regen) +# else: +# log.debug(f'"{source}", "{py_source}" and "{stub}" are up-to-date') +# +# self.extensions[0].sources.append(source) +# +# super().build_extensions() + + +class CustomBuildPy(build_py): SCHEMA_FILE = os.path.join("xml_parser_generator", "schema.json") - MODULE_TEMPLATE = os.path.join("xml_parser_generator", "module_template.c.in") PY_MODULE_TEMPLATE = os.path.join("xml_parser_generator", "module_template.py.in") - STUBS_TEMPLATE = os.path.join("xml_parser_generator", "stubs_template.pyi.in") MAKER_SOURCE = os.path.join("xml_parser_generator", "make_parser.py") + PARSER_DEST = os.path.join("breathe", "_parser.py") - M_DEPENDENCIES = [SCHEMA_FILE, MODULE_TEMPLATE, MAKER_SOURCE] PY_M_DEPENDENCIES = [SCHEMA_FILE, PY_MODULE_TEMPLATE, MAKER_SOURCE] - S_DEPENDENCIES = [SCHEMA_FILE, STUBS_TEMPLATE, MAKER_SOURCE] - - def initialize_options(self): - super().initialize_options() - self.cpp_opts = None - self.ld_opts = None - - def finalize_options(self): - if self.cpp_opts is not None: - self.cpp_opts = split_quoted(self.cpp_opts) - if self.ld_opts is not None: - self.ld_opts = split_quoted(self.ld_opts) - - self.set_undefined_options("build", ("cpp_opts", "cpp_opts"), ("ld_opts", "ld_opts")) - super().finalize_options() - - def build_extensions(self): - assert len(self.extensions) == 1 - - if not self.debug: - # The parser doesn't do any complicated calculation; its speed will - # mostly depend on file read and memory allocation speed. Thus it's - # better to optimize for size. - c = self.compiler.compiler_type - if c == "msvc": - self.extensions[0].extra_compile_args = ["/O1"] - elif c in {"unix", "cygwin", "mingw32"}: - self.extensions[0].extra_compile_args = ["-Os"] - self.extensions[0].extra_link_args = ["-s"] - - source = os.path.join(self.build_temp, self.extensions[0].name + ".c") - - # put the stub and Python file in the same place that the extension - # module will be - ext_dest = self.get_ext_fullpath(self.extensions[0].name) - libdir = os.path.dirname(ext_dest) - stub = os.path.join(libdir, self.extensions[0].name + ".pyi") - py_source = os.path.join(libdir, self.extensions[0].name + "_py.py") - - mkpath(self.build_temp, dry_run=self.dry_run) - mkpath(libdir, dry_run=self.dry_run) - - regen = [] - for dep, out, tmpl in ( - (self.M_DEPENDENCIES, source, self.MODULE_TEMPLATE), - (self.PY_M_DEPENDENCIES, py_source, self.PY_MODULE_TEMPLATE), - (self.S_DEPENDENCIES, stub, self.STUBS_TEMPLATE), - ): - if self.force or newer_group(dep, out): - regen.append((tmpl, out)) - - if regen: - log.info("generating module source from templates") - if not self.dry_run: - make_parser.generate_from_json(self.SCHEMA_FILE, regen) - else: - log.debug(f'"{source}", "{py_source}" and "{stub}" are up-to-date') - self.extensions[0].sources.append(source) + def make_parser(self): + dest = self.PARSER_DEST + if not self.editable_mode: + dest = os.path.join(self.build_lib, dest) + mkpath(os.path.dirname(dest), dry_run=self.dry_run) - super().build_extensions() + if self.force or newer_group(self.PY_M_DEPENDENCIES, dest): + log.info(f'generating "{dest}" source from template') + if not self.dry_run: + make_parser.generate_from_json(self.SCHEMA_FILE, [(self.PY_MODULE_TEMPLATE, dest)]) + else: + log.debug(f'"{dest}" is up-to-date') + + def run(self): + super().run() + self.make_parser() From 165937388c447a332b7c7fa8e02d924e3cd60cf0 Mon Sep 17 00:00:00 2001 From: Rouslan Korneychuk Date: Sun, 21 Jan 2024 19:59:10 -0500 Subject: [PATCH 58/65] Fixed working with multiple "projects" --- .gitignore | 3 +- breathe/README.rst | 6 +- breathe/directives/function.py | 29 ++-- breathe/parser.py | 35 ++-- breathe/process.py | 2 +- breathe/renderer/sphinxrenderer.py | 14 +- scripts/doxygen_cache.py | 62 ++++--- .../examples/test_group/compare-1.10.0.xml | 73 ++++++++ tests/data/multi_project/A/stuff.h | 16 ++ tests/data/multi_project/B/stuff.h | 16 ++ tests/data/multi_project/C/stuff.h | 16 ++ tests/data/multi_project/compare.xml | 67 +++++++ tests/data/multi_project/input.rst | 27 +++ tests/test_examples.py | 79 ++++++--- tests/test_parser.py | 1 + xml_parser_generator/make_parser.py | 12 +- xml_parser_generator/module_template.py.in | 9 +- xml_parser_generator/schema.json | 3 +- xml_parser_generator/setuptools_builder.py | 163 +++++++++--------- 19 files changed, 441 insertions(+), 192 deletions(-) create mode 100644 tests/data/examples/test_group/compare-1.10.0.xml create mode 100644 tests/data/multi_project/A/stuff.h create mode 100644 tests/data/multi_project/B/stuff.h create mode 100644 tests/data/multi_project/C/stuff.h create mode 100644 tests/data/multi_project/compare.xml create mode 100644 tests/data/multi_project/input.rst diff --git a/.gitignore b/.gitignore index e453ca57..51269156 100644 --- a/.gitignore +++ b/.gitignore @@ -50,5 +50,4 @@ examples/doxygen/example.tag examples/specific/dot_graphs/xml/dotfile.dot # generated in editable install -/breathe/_parser.pyi -/breathe/_parser.abi3.* +/breathe/_parser.py diff --git a/breathe/README.rst b/breathe/README.rst index 9c5a4899..04689f47 100644 --- a/breathe/README.rst +++ b/breathe/README.rst @@ -6,9 +6,6 @@ breathe - **directive** - Contains some rst directive definitions. These were split out of `directives.py` when it started to become too large. - - **parser** - Contains code for parsing the doxygen xml into a tree of Python - objects. The vast majority of the code is autogenerated but there are now - small but significant tweaks which means we don't regenerate it. - **finder** - Provides classes for finding nodes within the set of xml files generated by doxygen. Finders are generally used in the `run` methods of the directives to find the xml node which is then passed to the renderer to @@ -23,6 +20,9 @@ breathe - **directives** - Contains the definitions of some of the directives. The rest are in the files in the `directive` folder. It also contains all the set up code which registers with Sphinx and wires together all the various factories. + - **parser** - Contains code for parsing the doxygen xml into a tree of Python + objects. Most of its content is imported from `_parser`, which is generated + automatically when Breathe is built. - **process** - Contains the code responsible for running the `doxygen` process when using the `autodoxygen` directives. - **project** - Handles the concept of a `Project` which is the breathe term for diff --git a/breathe/directives/function.py b/breathe/directives/function.py index f89830bf..88854ffe 100644 --- a/breathe/directives/function.py +++ b/breathe/directives/function.py @@ -259,25 +259,16 @@ def _create_function_signature( ) -> str: "Standard render process used by subclasses" - try: - object_renderer = SphinxRenderer( - self.dox_parser.app, - project_info, - [tn.value for tn in node_stack], - self.state, - self.state.document, - target_handler, - self.dox_parser, - filter_, - ) - except parser.ParserError as e: - return format_parser_error( - "doxygenclass", e.message, e.filename, self.state, self.lineno, True - ) - except parser.FileIOError as e: - return format_parser_error( - "doxygenclass", e.error, e.filename, self.state, self.lineno, False - ) + object_renderer = SphinxRenderer( + self.dox_parser.app, + project_info, + [tn.value for tn in node_stack], + self.state, + self.state.document, + target_handler, + self.dox_parser, + filter_, + ) context = RenderContext(node_stack, mask_factory, directive_args) node = node_stack[0].value diff --git a/breathe/parser.py b/breathe/parser.py index 6381db6b..50cc64ff 100644 --- a/breathe/parser.py +++ b/breathe/parser.py @@ -10,13 +10,11 @@ from sphinx.application import Sphinx -from typing import overload, TYPE_CHECKING, TypeVar +from typing import overload, TYPE_CHECKING if TYPE_CHECKING: NodeOrValue = Node | str | None -T_inv = TypeVar("T_inv") - @reprlib.recursive_repr() def node_repr(self: Node) -> str: # pragma: no cover @@ -129,14 +127,19 @@ def _parse_common(filename: str, right_tag: str) -> Node_DoxygenType | Node_Doxy raise FileIOError(str(e), filename) +class ProjectData(NamedTuple): + index: DoxygenIndex + compound_cache: dict[str, DoxygenCompound] + + class DoxygenParser: def __init__(self, app: Sphinx) -> None: self.app = app - self.compound_index: DoxygenIndex | None = None - self.compound_cache: dict[str, DoxygenCompound] = {} + self.parsed_data: dict[str, ProjectData] = {} - def parse_index(self, project_info: ProjectInfo) -> DoxygenIndex: - r: DoxygenIndex | None = self.compound_index + def _get_project_data(self, project_info: ProjectInfo) -> ProjectData: + key = project_info.project_path() + r = self.parsed_data.get(key) if r is None: filename = path_handler.resolve_path(self.app, project_info.project_path(), "index.xml") @@ -144,13 +147,16 @@ def parse_index(self, project_info: ProjectInfo) -> DoxygenIndex: n = _parse_common(filename, "doxygenindex") assert isinstance(n, Node_DoxygenTypeIndex) - r = DoxygenIndex(n) - - self.compound_index = r + r = ProjectData(DoxygenIndex(n), {}) + self.parsed_data[key] = r return r + def parse_index(self, project_info: ProjectInfo) -> DoxygenIndex: + return self._get_project_data(project_info).index + def parse_compound(self, refid: str, project_info: ProjectInfo) -> DoxygenCompound: - r = self.compound_cache.get(refid) + cache = self._get_project_data(project_info).compound_cache + r = cache.get(refid) if r is None: filename = path_handler.resolve_path( self.app, project_info.project_path(), f"{refid}.xml" @@ -161,7 +167,7 @@ def parse_compound(self, refid: str, project_info: ProjectInfo) -> DoxygenCompou n = _parse_common(filename, "doxygen") assert isinstance(n, Node_DoxygenType) r = DoxygenCompound(n) - self.compound_cache[refid] = r + cache[refid] = r return r @@ -175,12 +181,7 @@ def tag_name_value(x: str) -> tuple[None, str]: ... -@overload def tag_name_value(x: TaggedValue[T_covar, U_covar] | str) -> tuple[T_covar | None, U_covar | str]: - ... - - -def tag_name_value(x): if isinstance(x, str): return None, x return x.name, x.value diff --git a/breathe/process.py b/breathe/process.py index 329541a2..e01d1d78 100644 --- a/breathe/process.py +++ b/breathe/process.py @@ -56,7 +56,7 @@ def generate_xml( doxygen_options: Mapping[str, str], doxygen_aliases: Mapping[str, str], ) -> None: - project_files: Mapping[str, ProjectData] = {} + project_files: dict[str, ProjectData] = {} # First collect together all the files which need to be doxygen processed for each project for project_name, file_structure in projects_source.items(): diff --git a/breathe/renderer/sphinxrenderer.py b/breathe/renderer/sphinxrenderer.py index 00839863..274b6a6e 100644 --- a/breathe/renderer/sphinxrenderer.py +++ b/breathe/renderer/sphinxrenderer.py @@ -539,16 +539,16 @@ def get_content(node: parser.Node_docParaType): ) -def get_parameterlists(node: parser.Node_docParaType): - return (value for name, value in map(parser.tag_name_value, node) if name == "parameterlist") +def get_parameterlists(node: parser.Node_docParaType) -> Iterable[parser.Node_docParamListType]: + return (value for name, value in map(parser.tag_name_value, node) if name == "parameterlist") # type: ignore -def get_simplesects(node: parser.Node_docParaType): - return (value for name, value in map(parser.tag_name_value, node) if name == "simplesect") +def get_simplesects(node: parser.Node_docParaType) -> Iterable[parser.Node_docSimpleSectType]: + return (value for name, value in map(parser.tag_name_value, node) if name == "simplesect") # type: ignore -def get_images(node: parser.Node_docParaType): - return (value for name, value in map(parser.tag_name_value, node) if name == "image") +def get_images(node: parser.Node_docParaType) -> Iterable[parser.Node_docImageType]: + return (value for name, value in map(parser.tag_name_value, node) if name == "image") # type: ignore class NodeHandler(Generic[T]): @@ -820,7 +820,7 @@ def run_directive( if self.context.child: signode.children = [ n for n in signode.children if n.tagname != "desc_addname" - ] # type: ignore + ] return nodes def handle_compounddef_declaration( diff --git a/scripts/doxygen_cache.py b/scripts/doxygen_cache.py index 453ceb67..d8ab6ef9 100644 --- a/scripts/doxygen_cache.py +++ b/scripts/doxygen_cache.py @@ -14,53 +14,61 @@ CACHE_DIR = EXAMPLES_DIR / "_cache" +def run_one(p, name, template, exec): + print(f"generating output for {name}") + os.chdir(p) + out_dir = CACHE_DIR / name + out_dir.mkdir(exist_ok=True) + doxyfile = out_dir / "Doxyfile" + doxycontent = template.format(output=out_dir) + extra_opts = pathlib.Path("extra_dox_opts.txt") + if extra_opts.exists(): + doxycontent += extra_opts.read_text() + doxyfile.write_text(doxycontent) + + subprocess.run([exec, doxyfile], check=True) + def make_cache(): template = (EXAMPLES_DIR / "doxyfile_template").read_text() - exc = shutil.which("doxygen") - if exc is None: + exec = shutil.which("doxygen") + if exec is None: raise ValueError("cannot find doxygen executable") - + CACHE_DIR.mkdir(exist_ok=True) prev_dir = os.getcwd() r = subprocess.run( - [exc, "--version"], check=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True + [exec, "--version"], check=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True ) (CACHE_DIR / "version.txt").write_text(r.stdout) try: for p in EXAMPLES_DIR.glob("test_*"): - print(f"generating output for {p.name}") - os.chdir(p) - out_dir = CACHE_DIR / p.name - out_dir.mkdir(exist_ok=True) - doxyfile = out_dir / "Doxyfile" - doxycontent = template.format(output=out_dir) - extra_opts = pathlib.Path("extra_dox_opts.txt") - if extra_opts.exists(): - doxycontent += extra_opts.read_text() - doxyfile.write_text(doxycontent) - - subprocess.run([exc, doxyfile], check=True) - - print(f"generating output for auto") + run_one(p, p.name, template, exec) + + print("generating output for auto") os.chdir(DATA_DIR / "auto") out_dir = CACHE_DIR / "auto" out_dir.mkdir(exist_ok=True) doxyfile = out_dir / "Doxyfile" - doxyfile.write_text(AUTOCFG_TEMPLATE.format( - project_name="example", - output_dir=str(out_dir), - input='"auto_class.h" "auto_function.h"', - extra="" - )) - - subprocess.run([exc, doxyfile], check=True) + doxyfile.write_text( + AUTOCFG_TEMPLATE.format( + project_name="example", + output_dir=str(out_dir), + input='"auto_class.h" "auto_function.h"', + extra="", + ) + ) + + subprocess.run([exec, doxyfile], check=True) + + for c in "AB": + run_one(DATA_DIR / "multi_project" / c, f"multi_project.{c}", template, exec) finally: os.chdir(prev_dir) if __name__ == "__main__": - make_cache() \ No newline at end of file + make_cache() diff --git a/tests/data/examples/test_group/compare-1.10.0.xml b/tests/data/examples/test_group/compare-1.10.0.xml new file mode 100644 index 00000000..7270458d --- /dev/null +++ b/tests/data/examples/test_group/compare-1.10.0.xml @@ -0,0 +1,73 @@ + + + + + group mygroup + + This is the first group + + Functions + + + void groupedFunction + + This function is in MyGroup. + + + + + + class GroupedClassTest + + first class inside of namespace + + Public Functions + + + virtual void publicFunction const = 0 + + namespaced class function + + + + + + class PublicClass + + A protected class. + + + + + class UndocumentedPublicClass + + + + + + + + + group innergroup + + This is an inner group + + + class InnerGroupClassTest + + inner class inside of namespace + + Public Functions + + + inline void function + + inner namespaced class function + + + + + + + + diff --git a/tests/data/multi_project/A/stuff.h b/tests/data/multi_project/A/stuff.h new file mode 100644 index 00000000..7fe2e75d --- /dev/null +++ b/tests/data/multi_project/A/stuff.h @@ -0,0 +1,16 @@ + +/** + * Doc for fun1 in project A + */ +void fun1(); + +/** + * Doc for fun2 in project A + */ +void fun2(); + +/** + * Unique function for project A + */ +void funA(); + diff --git a/tests/data/multi_project/B/stuff.h b/tests/data/multi_project/B/stuff.h new file mode 100644 index 00000000..7a9486ca --- /dev/null +++ b/tests/data/multi_project/B/stuff.h @@ -0,0 +1,16 @@ + +/** + * Doc for fun1 in project B + */ +void fun1(); + +/** + * Doc for fun2 in project B + */ +void fun2(); + +/** + * Unique function for project B + */ +void funB(); + diff --git a/tests/data/multi_project/C/stuff.h b/tests/data/multi_project/C/stuff.h new file mode 100644 index 00000000..43874150 --- /dev/null +++ b/tests/data/multi_project/C/stuff.h @@ -0,0 +1,16 @@ + +/** + * Doc for fun1 in project C + */ +void fun1(); + +/** + * Doc for fun2 in project C + */ +void fun2(); + +/** + * Unique function for project C + */ +void funC(); + diff --git a/tests/data/multi_project/compare.xml b/tests/data/multi_project/compare.xml new file mode 100644 index 00000000..110ba679 --- /dev/null +++ b/tests/data/multi_project/compare.xml @@ -0,0 +1,67 @@ + + + + + + void fun1 + + Doc for fun1 in project A. + + + + + void fun1 + + Doc for fun1 in project B. + + + + + void fun1 + + Doc for fun1 in project C. + + + + + void fun2 + + Doc for fun2 in project A. + + + + + void fun2 + + Doc for fun2 in project B. + + + + + void fun2 + + Doc for fun2 in project C. + + + + + void funA + + Unique function for project A. + + + + + void funB + + Unique function for project B. + + + + + void funC + + Unique function for project C. + + + diff --git a/tests/data/multi_project/input.rst b/tests/data/multi_project/input.rst new file mode 100644 index 00000000..494e0e3f --- /dev/null +++ b/tests/data/multi_project/input.rst @@ -0,0 +1,27 @@ +.. doxygenfunction:: fun1 + :project: A + +.. doxygenfunction:: fun1 + :project: B + +.. doxygenfunction:: fun1 + :path: {project_c_path} + +.. doxygenfunction:: fun2 + :project: A + +.. doxygenfunction:: fun2 + :project: B + +.. doxygenfunction:: fun2 + :path: {project_c_path} + +.. doxygenfunction:: funA + :project: A + +.. doxygenfunction:: funB + :project: B + +.. doxygenfunction:: funC + :path: {project_c_path} + diff --git a/tests/test_examples.py b/tests/test_examples.py index 9cc1f0a5..4151d85c 100644 --- a/tests/test_examples.py +++ b/tests/test_examples.py @@ -185,15 +185,14 @@ def str_to_version(v_str): def versioned_model(p): - fname = str(p) - return VersionedFile(fname, str_to_version(fname[len("compare-") : -len(".xml")])) + return VersionedFile(str(p), str_to_version(str(p.name)[len("compare-") : -len(".xml")])) -def compare_xml(generated, version): - alt_models = list(map(versioned_model, pathlib.Path(".").glob("compare-*.xml"))) +def compare_xml(generated, input_dir, version): + alt_models = list(map(versioned_model, input_dir.glob("compare-*.xml"))) alt_models.sort(key=(lambda f: f.version), reverse=True) - model = "compare.xml" + model = input_dir / "compare.xml" for alt_m in alt_models: if version >= alt_m.version: model = alt_m.file @@ -264,36 +263,50 @@ def doxygen(doxygen_cache): ) -@pytest.mark.parametrize("test_input", get_individual_tests()) -def test_example(make_app, tmp_path, test_input, monkeypatch, doxygen, doxygen_cache): - monkeypatch.chdir(test_input) - +def run_doxygen_with_template(doxygen, tmp_path, cache, example_name, output_name): doxyfile = tmp_path / "Doxyfile" doxycontent = doxygen.template.format(output=tmp_path) extra_opts = pathlib.Path("extra_dox_opts.txt") if extra_opts.exists(): doxycontent += extra_opts.read_text() doxyfile.write_text(doxycontent) - (tmp_path / "conf.py").touch() - shutil.copyfile("input.rst", tmp_path / "index.rst") - if doxygen_cache is not None: + if cache is not None: # instead of passing a different path to breathe_projects.example, the # folder is copied to the same place it would be without caching so that # all paths in the generated output remain the same - shutil.copytree( - doxygen_cache / test_input.name / "xml", - tmp_path / "xml") + shutil.copytree(cache / example_name / output_name, tmp_path / output_name) else: subprocess.run([doxygen.file, doxyfile], check=True) + if output_name != "xml": + os.rename(tmp_path / "xml", tmp_path / output_name) + + +def run_sphinx_and_compare(make_app, tmp_path, test_input, overrides, version): + (tmp_path / "conf.py").touch() + shutil.copyfile(test_input / "input.rst", tmp_path / "index.rst") make_app( buildername="xml", srcdir=sphinx_path(tmp_path), - confoverrides=conf_overrides({"breathe_projects": {"example": str(tmp_path / "xml")}}), + confoverrides=conf_overrides(overrides), ).build() - compare_xml(tmp_path / "_build" / "xml" / "index.xml", doxygen.version) + compare_xml(tmp_path / "_build" / "xml" / "index.xml", test_input, version) + + +@pytest.mark.parametrize("test_input", get_individual_tests()) +def test_example(make_app, tmp_path, test_input, monkeypatch, doxygen, doxygen_cache): + monkeypatch.chdir(test_input) + + run_doxygen_with_template(doxygen, tmp_path, doxygen_cache, test_input.name, "xml") + run_sphinx_and_compare( + make_app, + tmp_path, + test_input, + {"breathe_projects": {"example": str(tmp_path / "xml")}}, + doxygen.version, + ) def test_auto(make_app, tmp_path, monkeypatch, doxygen, doxygen_cache): @@ -304,19 +317,37 @@ def test_auto(make_app, tmp_path, monkeypatch, doxygen, doxygen_cache): xml_path = str(doxygen_cache / "auto" / "xml") monkeypatch.setattr(AutoDoxygenProcessHandle, "process", (lambda *args, **kwds: xml_path)) + run_sphinx_and_compare( + make_app, + tmp_path, + test_input, + { + "breathe_projects_source": { + "example": (str(test_input.absolute()), ["auto_class.h", "auto_function.h"]) + } + }, + doxygen.version, + ) + + +def test_multiple_projects(make_app, tmp_path, monkeypatch, doxygen, doxygen_cache): + test_input = TEST_DATA_DIR / "multi_project" + + for c in "ABC": + monkeypatch.chdir(test_input / c) + run_doxygen_with_template(doxygen, tmp_path, doxygen_cache, f"multi_project.{c}", f"xml{c}") + (tmp_path / "conf.py").touch() - shutil.copyfile("input.rst", tmp_path / "index.rst") + (tmp_path / "index.rst").write_text( + (test_input / "input.rst").read_text().format(project_c_path=str(tmp_path / "xmlC")) + ) make_app( buildername="xml", srcdir=sphinx_path(tmp_path), confoverrides=conf_overrides( - { - "breathe_projects_source": { - "example": (str(test_input.absolute()), ["auto_class.h", "auto_function.h"]) - } - } + {"breathe_projects": {"A": str(tmp_path / "xmlA"), "B": str(tmp_path / "xmlB")}} ), ).build() - compare_xml(tmp_path / "_build" / "xml" / "index.xml", doxygen.version) + compare_xml(tmp_path / "_build" / "xml" / "index.xml", test_input, doxygen.version) diff --git a/tests/test_parser.py b/tests/test_parser.py index 01c55476..56ba7777 100644 --- a/tests/test_parser.py +++ b/tests/test_parser.py @@ -69,6 +69,7 @@ def test_unknown_tag(): assert len(record) == 1 assert "Warning on line 5:" in str(record[0].message) + def test_string_coalesce(): xml = """ diff --git a/xml_parser_generator/make_parser.py b/xml_parser_generator/make_parser.py index a9907e0b..7e6f3c5e 100644 --- a/xml_parser_generator/make_parser.py +++ b/xml_parser_generator/make_parser.py @@ -4,7 +4,6 @@ from __future__ import annotations import re -import sys import json import enum import dataclasses @@ -15,7 +14,6 @@ from typing import Any, Callable, cast, Literal, NamedTuple, NoReturn, TYPE_CHECKING, TypeVar import jinja2 -#import perfect_hash if TYPE_CHECKING: from collections.abc import Iterable, Sequence @@ -542,7 +540,7 @@ class HashData(NamedTuple): g: list[int] -#def generate_hash(items: list[str]) -> HashData: +# def generate_hash(items: list[str]) -> HashData: # try: # f1, f2, g = perfect_hash.generate_hash(items) # return HashData(f1.salt, f2.salt, g) @@ -579,7 +577,7 @@ def field_count(t) -> int: return len(t.attributes) + len(t.children) + sum(cast(int, field_count(b)) for b in t.bases) for t in schema.types.values(): - #if isinstance(t, SchemaEnum): + # if isinstance(t, SchemaEnum): # if len(t.children) >= HASH_LOOKUP_THRESHOLD: # t.hash = generate_hash([item.xml for item in t.children]) if isinstance(t, SchemaCharEnum): @@ -790,9 +788,9 @@ def __call__(self): "element_names": elements, "attribute_names": attributes, "py_field_names": py_field_names, - #"e_hash": generate_hash(elements), - #"a_hash": generate_hash(attributes), - #"py_f_hash": generate_hash(py_field_names), + # "e_hash": generate_hash(elements), + # "a_hash": generate_hash(attributes), + # "py_f_hash": generate_hash(py_field_names), "union_tag_names": sorted(tag_names), "char_enum_chars": {c: i for i, c in enumerate(sorted(char_enum_chars))}, "list_element_field_counts": list(list_element_field_counts), diff --git a/xml_parser_generator/module_template.py.in b/xml_parser_generator/module_template.py.in index 06acf4b4..063caac2 100644 --- a/xml_parser_generator/module_template.py.in +++ b/xml_parser_generator/module_template.py.in @@ -1,3 +1,5 @@ +# flake8: noqa + """ Python module to parse Doxygen's XML output. @@ -308,8 +310,8 @@ class _ParseCallbacks: class _ParseState: - def __init__(self, parser, /): - self.parser: expat.XMLParserType = parser + def __init__(self, parser: expat.XMLParserType, /): + self.parser = parser self.parse_callbacks: list[_ParseCallbacks] = [] # While this is greater than zero all XML content is ignored. @@ -397,7 +399,7 @@ def _push_tuple_item( obj[-1] = cls._make(obj[-1]) # tuples are immutable so a list is used while collecting the values - new_tuple = [] + new_tuple: list[Any] = [] obj.append(new_tuple) return new_tuple.append @@ -509,6 +511,7 @@ def _union_codepoint_element(c): _node_start_const_char(state, attr) return inner +_cur_list: dict[str, Callable] def _add_to_list(name): def inner(f): global _cur_list diff --git a/xml_parser_generator/schema.json b/xml_parser_generator/schema.json index f6cdb208..ba2a7e40 100644 --- a/xml_parser_generator/schema.json +++ b/xml_parser_generator/schema.json @@ -572,7 +572,8 @@ } }, "reimplementType": { - "kind": "union_list_element","allow_text": true, + "kind": "union_list_element", + "allow_text": true, "attributes": { "refid": {"type": "#string"} } diff --git a/xml_parser_generator/setuptools_builder.py b/xml_parser_generator/setuptools_builder.py index 7bbf611f..dd3f3b2e 100644 --- a/xml_parser_generator/setuptools_builder.py +++ b/xml_parser_generator/setuptools_builder.py @@ -2,12 +2,12 @@ import os.path -#try: -# from setuptools.command.build import build -#except ImportError: -# from distutils.command.build import build +# try: +# from setuptools.command.build import build +# except ImportError: +# from distutils.command.build import build -#from setuptools.command.build_ext import build_ext +# from setuptools.command.build_ext import build_ext from setuptools.command.build_py import build_py try: @@ -17,108 +17,109 @@ from distutils import log from distutils.dir_util import mkpath -#from distutils.util import split_quoted + +# from distutils.util import split_quoted import make_parser -#extra_user_options = [ -# ("cpp-opts=", None, "extra command line arguments for the compiler"), -# ("ld-opts=", None, "extra command line arguments for the linker"), -#] +# extra_user_options = [ +# ("cpp-opts=", None, "extra command line arguments for the compiler"), +# ("ld-opts=", None, "extra command line arguments for the linker"), +# ] -#class CustomBuild(build): -# """Add extra parameters for 'build' to pass to 'build_ext'""" +# class CustomBuild(build): +# """Add extra parameters for 'build' to pass to 'build_ext'""" # -# user_options = build.user_options + extra_user_options +# user_options = build.user_options + extra_user_options # -# def initialize_options(self): -# super().initialize_options() -# self.cpp_opts = "" -# self.ld_opts = "" +# def initialize_options(self): +# super().initialize_options() +# self.cpp_opts = "" +# self.ld_opts = "" # -# def finalize_options(self): -# super().finalize_options() -# self.cpp_opts = split_quoted(self.cpp_opts) -# self.ld_opts = split_quoted(self.ld_opts) +# def finalize_options(self): +# super().finalize_options() +# self.cpp_opts = split_quoted(self.cpp_opts) +# self.ld_opts = split_quoted(self.ld_opts) -#class CustomBuildExt(build_ext): -# """Extend build_ext to automatically generate the parser module""" +# class CustomBuildExt(build_ext): +# """Extend build_ext to automatically generate the parser module""" # -# user_options = build_ext.user_options + extra_user_options +# user_options = build_ext.user_options + extra_user_options # -# SCHEMA_FILE = os.path.join("xml_parser_generator", "schema.json") -# MODULE_TEMPLATE = os.path.join("xml_parser_generator", "module_template.c.in") -# PY_MODULE_TEMPLATE = os.path.join("xml_parser_generator", "module_template.py.in") -# STUBS_TEMPLATE = os.path.join("xml_parser_generator", "stubs_template.pyi.in") -# MAKER_SOURCE = os.path.join("xml_parser_generator", "make_parser.py") +# SCHEMA_FILE = os.path.join("xml_parser_generator", "schema.json") +# MODULE_TEMPLATE = os.path.join("xml_parser_generator", "module_template.c.in") +# PY_MODULE_TEMPLATE = os.path.join("xml_parser_generator", "module_template.py.in") +# STUBS_TEMPLATE = os.path.join("xml_parser_generator", "stubs_template.pyi.in") +# MAKER_SOURCE = os.path.join("xml_parser_generator", "make_parser.py") # -# M_DEPENDENCIES = [SCHEMA_FILE, MODULE_TEMPLATE, MAKER_SOURCE] -# PY_M_DEPENDENCIES = [SCHEMA_FILE, PY_MODULE_TEMPLATE, MAKER_SOURCE] -# S_DEPENDENCIES = [SCHEMA_FILE, STUBS_TEMPLATE, MAKER_SOURCE] +# M_DEPENDENCIES = [SCHEMA_FILE, MODULE_TEMPLATE, MAKER_SOURCE] +# PY_M_DEPENDENCIES = [SCHEMA_FILE, PY_MODULE_TEMPLATE, MAKER_SOURCE] +# S_DEPENDENCIES = [SCHEMA_FILE, STUBS_TEMPLATE, MAKER_SOURCE] # -# def initialize_options(self): -# super().initialize_options() -# self.cpp_opts = None -# self.ld_opts = None +# def initialize_options(self): +# super().initialize_options() +# self.cpp_opts = None +# self.ld_opts = None # -# def finalize_options(self): -# if self.cpp_opts is not None: -# self.cpp_opts = split_quoted(self.cpp_opts) -# if self.ld_opts is not None: -# self.ld_opts = split_quoted(self.ld_opts) +# def finalize_options(self): +# if self.cpp_opts is not None: +# self.cpp_opts = split_quoted(self.cpp_opts) +# if self.ld_opts is not None: +# self.ld_opts = split_quoted(self.ld_opts) # -# self.set_undefined_options("build", ("cpp_opts", "cpp_opts"), ("ld_opts", "ld_opts")) -# super().finalize_options() +# self.set_undefined_options("build", ("cpp_opts", "cpp_opts"), ("ld_opts", "ld_opts")) +# super().finalize_options() # -# def build_extensions(self): -# assert len(self.extensions) == 1 +# def build_extensions(self): +# assert len(self.extensions) == 1 # -# if not self.debug: -# # The parser doesn't do any complicated calculation; its speed will -# # mostly depend on file read and memory allocation speed. Thus it's -# # better to optimize for size. -# c = self.compiler.compiler_type -# if c == "msvc": -# self.extensions[0].extra_compile_args = ["/O1"] -# elif c in {"unix", "cygwin", "mingw32"}: -# self.extensions[0].extra_compile_args = ["-Os"] -# self.extensions[0].extra_link_args = ["-s"] +# if not self.debug: +# # The parser doesn't do any complicated calculation; its speed will +# # mostly depend on file read and memory allocation speed. Thus it's +# # better to optimize for size. +# c = self.compiler.compiler_type +# if c == "msvc": +# self.extensions[0].extra_compile_args = ["/O1"] +# elif c in {"unix", "cygwin", "mingw32"}: +# self.extensions[0].extra_compile_args = ["-Os"] +# self.extensions[0].extra_link_args = ["-s"] # -# source = os.path.join(self.build_temp, self.extensions[0].name + ".c") +# source = os.path.join(self.build_temp, self.extensions[0].name + ".c") # -# # put the stub and Python file in the same place that the extension -# # module will be -# ext_dest = self.get_ext_fullpath(self.extensions[0].name) -# libdir = os.path.dirname(ext_dest) -# stub = os.path.join(libdir, self.extensions[0].name + ".pyi") -# py_source = os.path.join(libdir, self.extensions[0].name + "_py.py") +# # put the stub and Python file in the same place that the extension +# # module will be +# ext_dest = self.get_ext_fullpath(self.extensions[0].name) +# libdir = os.path.dirname(ext_dest) +# stub = os.path.join(libdir, self.extensions[0].name + ".pyi") +# py_source = os.path.join(libdir, self.extensions[0].name + "_py.py") # -# mkpath(self.build_temp, dry_run=self.dry_run) -# mkpath(libdir, dry_run=self.dry_run) +# mkpath(self.build_temp, dry_run=self.dry_run) +# mkpath(libdir, dry_run=self.dry_run) # -# regen = [] -# for dep, out, tmpl in ( -# (self.M_DEPENDENCIES, source, self.MODULE_TEMPLATE), -# (self.PY_M_DEPENDENCIES, py_source, self.PY_MODULE_TEMPLATE), -# (self.S_DEPENDENCIES, stub, self.STUBS_TEMPLATE), -# ): -# if self.force or newer_group(dep, out): -# regen.append((tmpl, out)) +# regen = [] +# for dep, out, tmpl in ( +# (self.M_DEPENDENCIES, source, self.MODULE_TEMPLATE), +# (self.PY_M_DEPENDENCIES, py_source, self.PY_MODULE_TEMPLATE), +# (self.S_DEPENDENCIES, stub, self.STUBS_TEMPLATE), +# ): +# if self.force or newer_group(dep, out): +# regen.append((tmpl, out)) # -# if regen: -# log.info("generating module source from templates") -# if not self.dry_run: -# make_parser.generate_from_json(self.SCHEMA_FILE, regen) -# else: -# log.debug(f'"{source}", "{py_source}" and "{stub}" are up-to-date') +# if regen: +# log.info("generating module source from templates") +# if not self.dry_run: +# make_parser.generate_from_json(self.SCHEMA_FILE, regen) +# else: +# log.debug(f'"{source}", "{py_source}" and "{stub}" are up-to-date') # -# self.extensions[0].sources.append(source) +# self.extensions[0].sources.append(source) # -# super().build_extensions() +# super().build_extensions() class CustomBuildPy(build_py): @@ -141,7 +142,7 @@ def make_parser(self): make_parser.generate_from_json(self.SCHEMA_FILE, [(self.PY_MODULE_TEMPLATE, dest)]) else: log.debug(f'"{dest}" is up-to-date') - + def run(self): super().run() self.make_parser() From fab44adaeb966bdb2ad888fabef7b64dfd8f6245 Mon Sep 17 00:00:00 2001 From: Rouslan Korneychuk Date: Mon, 22 Jan 2024 02:06:03 -0500 Subject: [PATCH 59/65] Now compatible with in-development sphinx version 7.3 --- breathe/directives/function.py | 22 +++++++++++++--------- breathe/directives/setup.py | 20 ++++++++++---------- breathe/parser.py | 6 +++--- breathe/renderer/sphinxrenderer.py | 13 +++++++------ scripts/doxygen_cache.py | 1 + 5 files changed, 34 insertions(+), 28 deletions(-) diff --git a/breathe/directives/function.py b/breathe/directives/function.py index 88854ffe..71a4ae35 100644 --- a/breathe/directives/function.py +++ b/breathe/directives/function.py @@ -1,24 +1,28 @@ from __future__ import annotations +import re from breathe.directives import BaseDirective from breathe.exception import BreatheError from breathe.file_state_cache import MTimeError from breathe import parser from breathe.project import ProjectError -from breathe.renderer import format_parser_error, RenderContext, mask, TaggedNode, filter +from breathe.renderer import RenderContext, mask, TaggedNode, filter from breathe.renderer.sphinxrenderer import WithContext from breathe.renderer.sphinxrenderer import SphinxRenderer from breathe.renderer.target import create_target_handler from docutils.parsers.rst.directives import unchanged_required, flag - +from docutils import nodes from sphinx.domains import cpp -from docutils import nodes +from typing import Any, cast, List, Optional, TYPE_CHECKING -import re +cppast: Any +try: + from sphinx.domains.cpp import _ast as cppast +except ImportError: + cppast = cpp -from typing import cast, List, Optional, TYPE_CHECKING if TYPE_CHECKING: import sys @@ -205,7 +209,7 @@ def run(self) -> List[Node]: self.directive_args, ) - def _parse_args(self, function_description: str) -> Optional[cpp.ASTParametersQualifiers]: + def _parse_args(self, function_description: str) -> Optional[cppast.ASTParametersQualifiers]: # Note: the caller must catch cpp.DefinitionError if function_description == "": return None @@ -233,11 +237,11 @@ def stripParamQual(paramQual): def stripDeclarator(declarator): if hasattr(declarator, "next"): stripDeclarator(declarator.next) - if isinstance(declarator, cpp.ASTDeclaratorParen): + if isinstance(declarator, cppast.ASTDeclaratorParen): assert hasattr(declarator, "inner") stripDeclarator(declarator.inner) else: - assert isinstance(declarator, cpp.ASTDeclaratorNameParamQual) + assert isinstance(declarator, cppast.ASTDeclaratorNameParamQual) assert hasattr(declarator, "declId") declarator.declId = None if declarator.paramQual is not None: @@ -294,7 +298,7 @@ def _create_function_signature( def _resolve_function( self, matches: list[filter.FinderMatch], - args: cpp.ASTParametersQualifiers | None, + args: cppast.ASTParametersQualifiers | None, project_info: project.ProjectInfo, ): if not matches: diff --git a/breathe/directives/setup.py b/breathe/directives/setup.py index b2ec6a96..5c01f0da 100644 --- a/breathe/directives/setup.py +++ b/breathe/directives/setup.py @@ -73,22 +73,22 @@ def set_temp_data( for name, directive in directives.items(): app.add_directive(name, directive) - app.add_config_value("breathe_projects", {}, True) # Dict[str, str] - app.add_config_value("breathe_default_project", "", True) # str + app.add_config_value("breathe_projects", {}, "env") # Dict[str, str] + app.add_config_value("breathe_default_project", "", "env") # str # Provide reasonable defaults for domain_by_extension mapping. Can be overridden by users. app.add_config_value( - "breathe_domain_by_extension", {"py": "py", "cs": "cs"}, True + "breathe_domain_by_extension", {"py": "py", "cs": "cs"}, "env" ) # Dict[str, str] - app.add_config_value("breathe_domain_by_file_pattern", {}, True) # Dict[str, str] - app.add_config_value("breathe_projects_source", {}, True) - app.add_config_value("breathe_build_directory", "", True) - app.add_config_value("breathe_default_members", (), True) + app.add_config_value("breathe_domain_by_file_pattern", {}, "env") # Dict[str, str] + app.add_config_value("breathe_projects_source", {}, "env") + app.add_config_value("breathe_build_directory", "", "env") + app.add_config_value("breathe_default_members", (), "env") app.add_config_value("breathe_show_define_initializer", False, "env") app.add_config_value("breathe_show_enumvalue_initializer", False, "env") app.add_config_value("breathe_show_include", True, "env") - app.add_config_value("breathe_implementation_filename_extensions", [".c", ".cc", ".cpp"], True) - app.add_config_value("breathe_doxygen_config_options", {}, True) - app.add_config_value("breathe_doxygen_aliases", {}, True) + app.add_config_value("breathe_implementation_filename_extensions", [".c", ".cc", ".cpp"], "env") + app.add_config_value("breathe_doxygen_config_options", {}, "env") + app.add_config_value("breathe_doxygen_aliases", {}, "env") app.add_config_value("breathe_use_project_refids", False, "env") app.add_config_value("breathe_order_parameters_first", False, "env") app.add_config_value("breathe_separate_member_pages", False, "env") diff --git a/breathe/parser.py b/breathe/parser.py index 50cc64ff..89059592 100644 --- a/breathe/parser.py +++ b/breathe/parser.py @@ -10,7 +10,7 @@ from sphinx.application import Sphinx -from typing import overload, TYPE_CHECKING +from typing import overload, NamedTuple, TYPE_CHECKING if TYPE_CHECKING: NodeOrValue = Node | str | None @@ -128,7 +128,7 @@ def _parse_common(filename: str, right_tag: str) -> Node_DoxygenType | Node_Doxy class ProjectData(NamedTuple): - index: DoxygenIndex + d_index: DoxygenIndex compound_cache: dict[str, DoxygenCompound] @@ -152,7 +152,7 @@ def _get_project_data(self, project_info: ProjectInfo) -> ProjectData: return r def parse_index(self, project_info: ProjectInfo) -> DoxygenIndex: - return self._get_project_data(project_info).index + return self._get_project_data(project_info).d_index def parse_compound(self, refid: str, project_info: ProjectInfo) -> DoxygenCompound: cache = self._get_project_data(project_info).compound_cache diff --git a/breathe/renderer/sphinxrenderer.py b/breathe/renderer/sphinxrenderer.py index 274b6a6e..af64ad28 100644 --- a/breathe/renderer/sphinxrenderer.py +++ b/breathe/renderer/sphinxrenderer.py @@ -540,15 +540,18 @@ def get_content(node: parser.Node_docParaType): def get_parameterlists(node: parser.Node_docParaType) -> Iterable[parser.Node_docParamListType]: - return (value for name, value in map(parser.tag_name_value, node) if name == "parameterlist") # type: ignore + pairs = map(parser.tag_name_value, node) # type: ignore + return (value for name, value in pairs if name == "parameterlist") # type: ignore def get_simplesects(node: parser.Node_docParaType) -> Iterable[parser.Node_docSimpleSectType]: - return (value for name, value in map(parser.tag_name_value, node) if name == "simplesect") # type: ignore + pairs = map(parser.tag_name_value, node) # type: ignore + return (value for name, value in pairs if name == "simplesect") # type: ignore def get_images(node: parser.Node_docParaType) -> Iterable[parser.Node_docImageType]: - return (value for name, value in map(parser.tag_name_value, node) if name == "image") # type: ignore + pairs = map(parser.tag_name_value, node) # type: ignore + return (value for name, value in pairs if name == "image") # type: ignore class NodeHandler(Generic[T]): @@ -818,9 +821,7 @@ def run_directive( signode = finder.declarator if self.context.child: - signode.children = [ - n for n in signode.children if n.tagname != "desc_addname" - ] + signode.children = [n for n in signode.children if n.tagname != "desc_addname"] return nodes def handle_compounddef_declaration( diff --git a/scripts/doxygen_cache.py b/scripts/doxygen_cache.py index d8ab6ef9..0a6bdbb2 100644 --- a/scripts/doxygen_cache.py +++ b/scripts/doxygen_cache.py @@ -28,6 +28,7 @@ def run_one(p, name, template, exec): subprocess.run([exec, doxyfile], check=True) + def make_cache(): template = (EXAMPLES_DIR / "doxyfile_template").read_text() From c8fb596cd80eadfc3a7ea158da6a972b50171ebf Mon Sep 17 00:00:00 2001 From: Rouslan Korneychuk Date: Mon, 22 Jan 2024 19:27:25 -0500 Subject: [PATCH 60/65] Updated makefile and version check script --- Makefile | 13 +++++++++++-- breathe/__init__.py | 2 +- scripts/version-check.py | 11 ++++++----- xml_parser_generator/setuptools_builder.py | 6 ++++++ 4 files changed, 24 insertions(+), 8 deletions(-) diff --git a/Makefile b/Makefile index 8acedb7d..08f64c52 100644 --- a/Makefile +++ b/Makefile @@ -1,3 +1,5 @@ +RM = rm -f +GENERATED_MOD = breathe/_parser.py .PHONY: all all: html pdf @@ -16,9 +18,16 @@ data: $(MAKE) -C examples/tinyxml all $(MAKE) -C examples/specific all +$(GENERATED_MOD): \ + xml_parser_generator/schema.json \ + xml_parser_generator/module_template.py.in \ + xml_parser_generator/make_parser.py + python3 xml_parser_generator/setuptools_builder.py + .PHONY: distclean distclean: clean $(MAKE) -C documentation clean + $(RM) $(GENERATED_MOD) .PHONY: clean clean: @@ -31,7 +40,7 @@ test: cd tests && python3 -m pytest -v .PHONY: dev-test -dev-test: +dev-test: $(GENERATED_MOD) cd tests && PYTHONPATH=../:$(PYTHONPATH) python3 -m pytest -v .PHONY: flake8 @@ -43,7 +52,7 @@ black: black --check . .PHONY: type-check -type-check: +type-check: $(GENERATED_MOD) mypy --warn-redundant-casts --warn-unused-ignores breathe tests .PHONY: version-check diff --git a/breathe/__init__.py b/breathe/__init__.py index feefadfc..d17bff65 100644 --- a/breathe/__init__.py +++ b/breathe/__init__.py @@ -1,6 +1,6 @@ from sphinx.application import Sphinx -# Keep in sync with setup.py __version__ +# Keep in sync with pyproject.toml "version" __version__ = "4.35.0" diff --git a/scripts/version-check.py b/scripts/version-check.py index 2ad59ccb..0b8d5632 100644 --- a/scripts/version-check.py +++ b/scripts/version-check.py @@ -9,12 +9,13 @@ import breathe setup_version = "" -with open("setup.py") as setup: - for line in setup: - if line.startswith("__version__"): - match = re.search('"(?P[^"]*)"', line) +with open("pyproject.toml") as project: + for line in project: + if line.startswith("version"): + match = re.search('"([^"]*)"', line) if match: - setup_version = match.group("version") + setup_version = match.group(1) + break if setup_version == breathe.__version__: print("Versions match") diff --git a/xml_parser_generator/setuptools_builder.py b/xml_parser_generator/setuptools_builder.py index dd3f3b2e..4def7e8c 100644 --- a/xml_parser_generator/setuptools_builder.py +++ b/xml_parser_generator/setuptools_builder.py @@ -146,3 +146,9 @@ def make_parser(self): def run(self): super().run() self.make_parser() + + +if __name__ == "__main__": + make_parser.generate_from_json( + CustomBuildPy.SCHEMA_FILE, [(CustomBuildPy.PY_MODULE_TEMPLATE, CustomBuildPy.PARSER_DEST)] + ) From 5465c9d64c3969285d98a3e93b66eb13fd4679e0 Mon Sep 17 00:00:00 2001 From: Rouslan Korneychuk Date: Tue, 23 Jan 2024 18:47:07 -0500 Subject: [PATCH 61/65] fixed/silenced mypy errors --- breathe/directives/function.py | 12 +++++++----- breathe/renderer/mask.py | 2 +- breathe/renderer/sphinxrenderer.py | 9 +++++++-- 3 files changed, 15 insertions(+), 8 deletions(-) diff --git a/breathe/directives/function.py b/breathe/directives/function.py index 71a4ae35..b60392cd 100644 --- a/breathe/directives/function.py +++ b/breathe/directives/function.py @@ -17,11 +17,13 @@ from typing import Any, cast, List, Optional, TYPE_CHECKING -cppast: Any -try: - from sphinx.domains.cpp import _ast as cppast -except ImportError: - cppast = cpp +if TYPE_CHECKING: + cppast: Any +else: + try: + from sphinx.domains.cpp import _ast as cppast + except ImportError: + cppast = cpp if TYPE_CHECKING: diff --git a/breathe/renderer/mask.py b/breathe/renderer/mask.py index 1680087a..24d75d06 100644 --- a/breathe/renderer/mask.py +++ b/breathe/renderer/mask.py @@ -24,7 +24,7 @@ from breathe import parser -def no_parameter_names(node: parser.NodeOrValue): +def no_parameter_names(node: parser.NodeOrValue) -> parser.Node_paramType: assert isinstance(node, parser.Node_paramType) return parser.Node_paramType( array=node.array, diff --git a/breathe/renderer/sphinxrenderer.py b/breathe/renderer/sphinxrenderer.py index af64ad28..556d614f 100644 --- a/breathe/renderer/sphinxrenderer.py +++ b/breathe/renderer/sphinxrenderer.py @@ -818,7 +818,9 @@ def run_directive( rst_node.walk(finder) assert finder.declarator - signode = finder.declarator + # the type is set to "Any" to get around missing typing info in + # docutils 0.20.1 + signode: Any = finder.declarator if self.context.child: signode.children = [n for n in signode.children if n.tagname != "desc_addname"] @@ -2446,7 +2448,10 @@ def visit_function(self, node: parser.Node_memberdefType) -> list[nodes.Node]: if not self.app.env.config.breathe_debug_trace_doxygen_ids: target = self.create_doxygen_target(node) assert target is not None - rst_node.children[0].insert(0, target) + + # the type is cast to "Any" to get around missing typing info in + # docutils 0.20.1 + cast(Any, rst_node.children[0]).insert(0, target) finder.content.extend(self.description(node)) return nodes_ From 5113947ba197933beb24d67d25cfcdf7ca134b1a Mon Sep 17 00:00:00 2001 From: Rouslan Korneychuk Date: Sat, 10 Feb 2024 18:36:06 -0500 Subject: [PATCH 62/65] Added Doxygen 1.9.7 to unit test action matrix --- .github/workflows/cache_doxygen.yml | 13 ++++++------- .github/workflows/documentation.yml | 7 ++++--- .github/workflows/unit_tests.yml | 12 ++++++------ 3 files changed, 16 insertions(+), 16 deletions(-) diff --git a/.github/workflows/cache_doxygen.yml b/.github/workflows/cache_doxygen.yml index 914c86e5..46e6cf1b 100644 --- a/.github/workflows/cache_doxygen.yml +++ b/.github/workflows/cache_doxygen.yml @@ -1,19 +1,21 @@ name: download and cache Doxygen on: workflow_call -env: - DOXYGEN_VERSION: 1.9.4 jobs: install: runs-on: ubuntu-latest concurrency: group: linux-doxygen-cache + strategy: + fail-fast: false + matrix: + doxygen-version: ['1.9.4', '1.9.7'] steps: - uses: actions/cache/restore@v3 id: cache-doxygen with: path: doxygen-bin-arc - key: ${{ runner.os }}-doxygen-${{ env.DOXYGEN_VERSION }} + key: ${{ runner.os }}-doxygen-${{ matrix.doxygen-version }} lookup-only: true restore-keys: | ${{ runner.os }}-doxygen- @@ -22,10 +24,7 @@ jobs: if: steps.cache-doxygen.outputs.cache-hit != 'true' run: | mkdir doxygen-bin-arc && cd doxygen-bin-arc - curl -L https://sourceforge.net/projects/doxygen/files/rel-$DOXYGEN_VERSION/doxygen-$DOXYGEN_VERSION.linux.bin.tar.gz > doxygen.tar.gz - gunzip doxygen.tar.gz - tar xf doxygen.tar - mv doxygen-$DOXYGEN_VERSION doxygen + curl -L https://sourceforge.net/projects/doxygen/files/rel-${{ matrix.doxygen-version }}/doxygen-${{ matrix.doxygen-version }}.linux.bin.tar.gz > doxygen-${{ matrix.doxygen-version }}.tar.gz - uses: actions/cache/save@v3 if: steps.cache-doxygen.outputs.cache-hit != 'true' diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml index aa1698c5..4157f0e4 100644 --- a/.github/workflows/documentation.yml +++ b/.github/workflows/documentation.yml @@ -33,12 +33,13 @@ jobs: path: doxygen-bin-arc fail-on-cache-miss: true key: ${{ runner.os }}-doxygen-${{ env.DOXYGEN_VERSION }} - restore-keys: | - ${{ runner.os }}-doxygen- - name: install Doxygen run: | - cd doxygen-bin-arc/doxygen + cd doxygen-bin-arc + gunzip doxygen.tar.gz + tar xf doxygen.tar + cd doxygen-${{ env.DOXYGEN_VERSION }} sudo make install - name: build the documentation diff --git a/.github/workflows/unit_tests.yml b/.github/workflows/unit_tests.yml index 8a6a55cb..a3606b1c 100644 --- a/.github/workflows/unit_tests.yml +++ b/.github/workflows/unit_tests.yml @@ -1,7 +1,5 @@ name: unit tests on: [push, pull_request] -env: - DOXYGEN_VERSION: 1.9.4 jobs: cache-doxygen: uses: ./.github/workflows/cache_doxygen.yml @@ -12,6 +10,7 @@ jobs: strategy: fail-fast: false matrix: + doxygen-version: ['1.9.4', '1.9.7'] python-version: ['3.8', '3.9', '3.10', '3.11', '3.12'] sphinx-version: - '4.2.0' @@ -68,13 +67,14 @@ jobs: with: path: doxygen-bin-arc fail-on-cache-miss: true - key: ${{ runner.os }}-doxygen-${{ env.DOXYGEN_VERSION }} - restore-keys: | - ${{ runner.os }}-doxygen- + key: ${{ runner.os }}-doxygen-${{ matrix.doxygen-version }} - name: install Doxygen run: | - cd doxygen-bin-arc/doxygen + cd doxygen-bin-arc + gunzip doxygen.tar.gz + tar xf doxygen.tar + cd doxygen-${{ matrix.doxygen-version }} sudo make install - name: install dependencies and build extension module From 01c61cdbe0d76596762047566f8487ed533e2106 Mon Sep 17 00:00:00 2001 From: Rouslan Korneychuk Date: Sat, 10 Feb 2024 18:45:06 -0500 Subject: [PATCH 63/65] Fixed minor lint error --- breathe/finder/compound.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/breathe/finder/compound.py b/breathe/finder/compound.py index 32254ba8..0faa9368 100644 --- a/breathe/finder/compound.py +++ b/breathe/finder/compound.py @@ -44,7 +44,7 @@ def filter_(self, ancestors, filter_: DoxFilter, matches: list[FinderMatch]) -> for memberdef in self.node.value.memberdef: self.run_filter(filter_, matches, node_stack, memberdef) - + for member in self.node.value.member: self.run_filter(filter_, matches, node_stack, member) From 65f2ee14294b896a8b6dece7789ac7a15157c90b Mon Sep 17 00:00:00 2001 From: Rouslan Korneychuk Date: Sat, 10 Feb 2024 18:48:45 -0500 Subject: [PATCH 64/65] Fixed mistake in actions --- .github/workflows/cache_doxygen.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/cache_doxygen.yml b/.github/workflows/cache_doxygen.yml index 46e6cf1b..cefc0685 100644 --- a/.github/workflows/cache_doxygen.yml +++ b/.github/workflows/cache_doxygen.yml @@ -24,7 +24,7 @@ jobs: if: steps.cache-doxygen.outputs.cache-hit != 'true' run: | mkdir doxygen-bin-arc && cd doxygen-bin-arc - curl -L https://sourceforge.net/projects/doxygen/files/rel-${{ matrix.doxygen-version }}/doxygen-${{ matrix.doxygen-version }}.linux.bin.tar.gz > doxygen-${{ matrix.doxygen-version }}.tar.gz + curl -L https://sourceforge.net/projects/doxygen/files/rel-${{ matrix.doxygen-version }}/doxygen-${{ matrix.doxygen-version }}.linux.bin.tar.gz > doxygen.tar.gz - uses: actions/cache/save@v3 if: steps.cache-doxygen.outputs.cache-hit != 'true' From 26cce59f9ae5faf539871dca278f486368a7fae1 Mon Sep 17 00:00:00 2001 From: Rouslan Korneychuk Date: Sat, 10 Feb 2024 18:56:24 -0500 Subject: [PATCH 65/65] Another Github action fix --- .github/workflows/cache_doxygen.yml | 6 +++--- .github/workflows/documentation.yml | 2 +- .github/workflows/unit_tests.yml | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/cache_doxygen.yml b/.github/workflows/cache_doxygen.yml index cefc0685..26274ac9 100644 --- a/.github/workflows/cache_doxygen.yml +++ b/.github/workflows/cache_doxygen.yml @@ -5,13 +5,13 @@ jobs: install: runs-on: ubuntu-latest concurrency: - group: linux-doxygen-cache + group: linux-doxygen-${{ matrix.doxygen-version }}-cache strategy: fail-fast: false matrix: doxygen-version: ['1.9.4', '1.9.7'] steps: - - uses: actions/cache/restore@v3 + - uses: actions/cache/restore@v4 id: cache-doxygen with: path: doxygen-bin-arc @@ -26,7 +26,7 @@ jobs: mkdir doxygen-bin-arc && cd doxygen-bin-arc curl -L https://sourceforge.net/projects/doxygen/files/rel-${{ matrix.doxygen-version }}/doxygen-${{ matrix.doxygen-version }}.linux.bin.tar.gz > doxygen.tar.gz - - uses: actions/cache/save@v3 + - uses: actions/cache/save@v4 if: steps.cache-doxygen.outputs.cache-hit != 'true' with: path: doxygen-bin-arc diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml index 4157f0e4..1c327b4e 100644 --- a/.github/workflows/documentation.yml +++ b/.github/workflows/documentation.yml @@ -27,7 +27,7 @@ jobs: pip install -r requirements/development.txt pip install --editable . - - uses: actions/cache/restore@v3 + - uses: actions/cache/restore@v4 id: cache-doxygen with: path: doxygen-bin-arc diff --git a/.github/workflows/unit_tests.yml b/.github/workflows/unit_tests.yml index a3606b1c..dab04bb4 100644 --- a/.github/workflows/unit_tests.yml +++ b/.github/workflows/unit_tests.yml @@ -62,7 +62,7 @@ jobs: pip install -Iv Sphinx==${{ matrix.sphinx-version }} fi - - uses: actions/cache/restore@v3 + - uses: actions/cache/restore@v4 id: cache-doxygen with: path: doxygen-bin-arc