From 62302377db1990485f7821f1ccef4ff6ef4d07d9 Mon Sep 17 00:00:00 2001 From: volaya Date: Fri, 8 Jan 2016 08:27:22 +0100 Subject: [PATCH] [processing] UI/UX improvements Related to QEP #19 Some algorithm description provided by Frank Sokolic --- python/ext-libs/CMakeLists.txt | 2 +- python/ext-libs/yaml/__init__.py | 315 ++++ python/ext-libs/yaml/composer.py | 139 ++ python/ext-libs/yaml/constructor.py | 675 ++++++++ python/ext-libs/yaml/cyaml.py | 85 + python/ext-libs/yaml/dumper.py | 62 + python/ext-libs/yaml/emitter.py | 1140 +++++++++++++ python/ext-libs/yaml/error.py | 75 + python/ext-libs/yaml/events.py | 86 + python/ext-libs/yaml/loader.py | 40 + python/ext-libs/yaml/nodes.py | 49 + python/ext-libs/yaml/parser.py | 589 +++++++ python/ext-libs/yaml/reader.py | 190 +++ python/ext-libs/yaml/representer.py | 484 ++++++ python/ext-libs/yaml/resolver.py | 224 +++ python/ext-libs/yaml/scanner.py | 1457 +++++++++++++++++ python/ext-libs/yaml/serializer.py | 111 ++ python/ext-libs/yaml/tokens.py | 104 ++ python/plugins/processing/algs/CMakeLists.txt | 1 + .../processing/algs/gdal/GdalAlgorithm.py | 10 +- .../processing/algs/gdal/extractprojection.py | 2 +- .../plugins/processing/algs/gdal/translate.py | 16 +- .../algs/grass/GrassAlgorithmProvider.py | 3 + .../algs/grass7/Grass7AlgorithmProvider.py | 3 + .../processing/algs/help/CMakeLists.txt | 5 + .../plugins/processing/algs/help/__init__.py | 31 + python/plugins/processing/algs/help/qgis.yaml | 439 +++++ .../algs/otb/OTBAlgorithmProvider.py | 3 + .../processing/algs/saga/SagaAlgorithm212.py | 24 +- .../algs/saga/SagaAlgorithmProvider.py | 2 +- .../algs/saga/SagaGroupNameDecorator.py | 93 -- .../processing/algs/saga/SagaNameDecorator.py | 155 ++ .../processing/algs/saga/SplitRGBBands.py | 2 +- .../algs/saga/saga_version_check.txt | 185 --- .../processing/core/AlgorithmProvider.py | 13 +- .../plugins/processing/core/GeoAlgorithm.py | 48 +- python/plugins/processing/core/Processing.py | 2 - .../processing/gui/AlgorithmClassification.py | 22 - .../processing/gui/AlgorithmDialogBase.py | 58 +- .../processing/gui/BatchAlgorithmDialog.py | 2 + .../processing/gui/ProcessingToolbox.py | 202 ++- .../processing/modeler/ModelerDialog.py | 103 +- .../plugins/processing/tools/translation.py | 3 +- .../plugins/processing/ui/DlgAlgorithmBase.ui | 142 +- .../processing/ui/ProcessingToolbox.ui | 48 +- 45 files changed, 6780 insertions(+), 664 deletions(-) create mode 100755 python/ext-libs/yaml/__init__.py create mode 100755 python/ext-libs/yaml/composer.py create mode 100755 python/ext-libs/yaml/constructor.py create mode 100755 python/ext-libs/yaml/cyaml.py create mode 100755 python/ext-libs/yaml/dumper.py create mode 100755 python/ext-libs/yaml/emitter.py create mode 100755 python/ext-libs/yaml/error.py create mode 100755 python/ext-libs/yaml/events.py create mode 100755 python/ext-libs/yaml/loader.py create mode 100755 python/ext-libs/yaml/nodes.py create mode 100755 python/ext-libs/yaml/parser.py create mode 100755 python/ext-libs/yaml/reader.py create mode 100755 python/ext-libs/yaml/representer.py create mode 100755 python/ext-libs/yaml/resolver.py create mode 100755 python/ext-libs/yaml/scanner.py create mode 100755 python/ext-libs/yaml/serializer.py create mode 100755 python/ext-libs/yaml/tokens.py create mode 100644 python/plugins/processing/algs/help/CMakeLists.txt create mode 100644 python/plugins/processing/algs/help/__init__.py create mode 100644 python/plugins/processing/algs/help/qgis.yaml delete mode 100644 python/plugins/processing/algs/saga/SagaGroupNameDecorator.py create mode 100644 python/plugins/processing/algs/saga/SagaNameDecorator.py delete mode 100644 python/plugins/processing/algs/saga/saga_version_check.txt diff --git a/python/ext-libs/CMakeLists.txt b/python/ext-libs/CMakeLists.txt index 399a68bb4a28..258f6d9f8f0e 100644 --- a/python/ext-libs/CMakeLists.txt +++ b/python/ext-libs/CMakeLists.txt @@ -33,7 +33,7 @@ MACRO(EXT_PYLIB lib) ENDIF(WITH_INTERNAL_${ulib}) ENDMACRO(EXT_PYLIB lib) -FOREACH(pkg httplib2 jinja2 markupsafe owslib pygments dateutil pytz) +FOREACH(pkg httplib2 jinja2 markupsafe owslib pygments dateutil pytz yaml) EXT_PYLIB(${pkg}) ENDFOREACH(pkg) diff --git a/python/ext-libs/yaml/__init__.py b/python/ext-libs/yaml/__init__.py new file mode 100755 index 000000000000..76e19e13f1ad --- /dev/null +++ b/python/ext-libs/yaml/__init__.py @@ -0,0 +1,315 @@ + +from error import * + +from tokens import * +from events import * +from nodes import * + +from loader import * +from dumper import * + +__version__ = '3.11' + +try: + from cyaml import * + __with_libyaml__ = True +except ImportError: + __with_libyaml__ = False + +def scan(stream, Loader=Loader): + """ + Scan a YAML stream and produce scanning tokens. + """ + loader = Loader(stream) + try: + while loader.check_token(): + yield loader.get_token() + finally: + loader.dispose() + +def parse(stream, Loader=Loader): + """ + Parse a YAML stream and produce parsing events. + """ + loader = Loader(stream) + try: + while loader.check_event(): + yield loader.get_event() + finally: + loader.dispose() + +def compose(stream, Loader=Loader): + """ + Parse the first YAML document in a stream + and produce the corresponding representation tree. + """ + loader = Loader(stream) + try: + return loader.get_single_node() + finally: + loader.dispose() + +def compose_all(stream, Loader=Loader): + """ + Parse all YAML documents in a stream + and produce corresponding representation trees. + """ + loader = Loader(stream) + try: + while loader.check_node(): + yield loader.get_node() + finally: + loader.dispose() + +def load(stream, Loader=Loader): + """ + Parse the first YAML document in a stream + and produce the corresponding Python object. + """ + loader = Loader(stream) + try: + return loader.get_single_data() + finally: + loader.dispose() + +def load_all(stream, Loader=Loader): + """ + Parse all YAML documents in a stream + and produce corresponding Python objects. + """ + loader = Loader(stream) + try: + while loader.check_data(): + yield loader.get_data() + finally: + loader.dispose() + +def safe_load(stream): + """ + Parse the first YAML document in a stream + and produce the corresponding Python object. + Resolve only basic YAML tags. + """ + return load(stream, SafeLoader) + +def safe_load_all(stream): + """ + Parse all YAML documents in a stream + and produce corresponding Python objects. + Resolve only basic YAML tags. + """ + return load_all(stream, SafeLoader) + +def emit(events, stream=None, Dumper=Dumper, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None): + """ + Emit YAML parsing events into a stream. + If stream is None, return the produced string instead. + """ + getvalue = None + if stream is None: + from StringIO import StringIO + stream = StringIO() + getvalue = stream.getvalue + dumper = Dumper(stream, canonical=canonical, indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break) + try: + for event in events: + dumper.emit(event) + finally: + dumper.dispose() + if getvalue: + return getvalue() + +def serialize_all(nodes, stream=None, Dumper=Dumper, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding='utf-8', explicit_start=None, explicit_end=None, + version=None, tags=None): + """ + Serialize a sequence of representation trees into a YAML stream. + If stream is None, return the produced string instead. + """ + getvalue = None + if stream is None: + if encoding is None: + from StringIO import StringIO + else: + from cStringIO import StringIO + stream = StringIO() + getvalue = stream.getvalue + dumper = Dumper(stream, canonical=canonical, indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break, + encoding=encoding, version=version, tags=tags, + explicit_start=explicit_start, explicit_end=explicit_end) + try: + dumper.open() + for node in nodes: + dumper.serialize(node) + dumper.close() + finally: + dumper.dispose() + if getvalue: + return getvalue() + +def serialize(node, stream=None, Dumper=Dumper, **kwds): + """ + Serialize a representation tree into a YAML stream. + If stream is None, return the produced string instead. + """ + return serialize_all([node], stream, Dumper=Dumper, **kwds) + +def dump_all(documents, stream=None, Dumper=Dumper, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding='utf-8', explicit_start=None, explicit_end=None, + version=None, tags=None): + """ + Serialize a sequence of Python objects into a YAML stream. + If stream is None, return the produced string instead. + """ + getvalue = None + if stream is None: + if encoding is None: + from StringIO import StringIO + else: + from cStringIO import StringIO + stream = StringIO() + getvalue = stream.getvalue + dumper = Dumper(stream, default_style=default_style, + default_flow_style=default_flow_style, + canonical=canonical, indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break, + encoding=encoding, version=version, tags=tags, + explicit_start=explicit_start, explicit_end=explicit_end) + try: + dumper.open() + for data in documents: + dumper.represent(data) + dumper.close() + finally: + dumper.dispose() + if getvalue: + return getvalue() + +def dump(data, stream=None, Dumper=Dumper, **kwds): + """ + Serialize a Python object into a YAML stream. + If stream is None, return the produced string instead. + """ + return dump_all([data], stream, Dumper=Dumper, **kwds) + +def safe_dump_all(documents, stream=None, **kwds): + """ + Serialize a sequence of Python objects into a YAML stream. + Produce only basic YAML tags. + If stream is None, return the produced string instead. + """ + return dump_all(documents, stream, Dumper=SafeDumper, **kwds) + +def safe_dump(data, stream=None, **kwds): + """ + Serialize a Python object into a YAML stream. + Produce only basic YAML tags. + If stream is None, return the produced string instead. + """ + return dump_all([data], stream, Dumper=SafeDumper, **kwds) + +def add_implicit_resolver(tag, regexp, first=None, + Loader=Loader, Dumper=Dumper): + """ + Add an implicit scalar detector. + If an implicit scalar value matches the given regexp, + the corresponding tag is assigned to the scalar. + first is a sequence of possible initial characters or None. + """ + Loader.add_implicit_resolver(tag, regexp, first) + Dumper.add_implicit_resolver(tag, regexp, first) + +def add_path_resolver(tag, path, kind=None, Loader=Loader, Dumper=Dumper): + """ + Add a path based resolver for the given tag. + A path is a list of keys that forms a path + to a node in the representation tree. + Keys can be string values, integers, or None. + """ + Loader.add_path_resolver(tag, path, kind) + Dumper.add_path_resolver(tag, path, kind) + +def add_constructor(tag, constructor, Loader=Loader): + """ + Add a constructor for the given tag. + Constructor is a function that accepts a Loader instance + and a node object and produces the corresponding Python object. + """ + Loader.add_constructor(tag, constructor) + +def add_multi_constructor(tag_prefix, multi_constructor, Loader=Loader): + """ + Add a multi-constructor for the given tag prefix. + Multi-constructor is called for a node if its tag starts with tag_prefix. + Multi-constructor accepts a Loader instance, a tag suffix, + and a node object and produces the corresponding Python object. + """ + Loader.add_multi_constructor(tag_prefix, multi_constructor) + +def add_representer(data_type, representer, Dumper=Dumper): + """ + Add a representer for the given type. + Representer is a function accepting a Dumper instance + and an instance of the given data type + and producing the corresponding representation node. + """ + Dumper.add_representer(data_type, representer) + +def add_multi_representer(data_type, multi_representer, Dumper=Dumper): + """ + Add a representer for the given type. + Multi-representer is a function accepting a Dumper instance + and an instance of the given data type or subtype + and producing the corresponding representation node. + """ + Dumper.add_multi_representer(data_type, multi_representer) + +class YAMLObjectMetaclass(type): + """ + The metaclass for YAMLObject. + """ + def __init__(cls, name, bases, kwds): + super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds) + if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None: + cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml) + cls.yaml_dumper.add_representer(cls, cls.to_yaml) + +class YAMLObject(object): + """ + An object that can dump itself to a YAML stream + and load itself from a YAML stream. + """ + + __metaclass__ = YAMLObjectMetaclass + __slots__ = () # no direct instantiation, so allow immutable subclasses + + yaml_loader = Loader + yaml_dumper = Dumper + + yaml_tag = None + yaml_flow_style = None + + def from_yaml(cls, loader, node): + """ + Convert a representation node to a Python object. + """ + return loader.construct_yaml_object(node, cls) + from_yaml = classmethod(from_yaml) + + def to_yaml(cls, dumper, data): + """ + Convert a Python object to a representation node. + """ + return dumper.represent_yaml_object(cls.yaml_tag, data, cls, + flow_style=cls.yaml_flow_style) + to_yaml = classmethod(to_yaml) + diff --git a/python/ext-libs/yaml/composer.py b/python/ext-libs/yaml/composer.py new file mode 100755 index 000000000000..06e5ac782f1a --- /dev/null +++ b/python/ext-libs/yaml/composer.py @@ -0,0 +1,139 @@ + +__all__ = ['Composer', 'ComposerError'] + +from error import MarkedYAMLError +from events import * +from nodes import * + +class ComposerError(MarkedYAMLError): + pass + +class Composer(object): + + def __init__(self): + self.anchors = {} + + def check_node(self): + # Drop the STREAM-START event. + if self.check_event(StreamStartEvent): + self.get_event() + + # If there are more documents available? + return not self.check_event(StreamEndEvent) + + def get_node(self): + # Get the root node of the next document. + if not self.check_event(StreamEndEvent): + return self.compose_document() + + def get_single_node(self): + # Drop the STREAM-START event. + self.get_event() + + # Compose a document if the stream is not empty. + document = None + if not self.check_event(StreamEndEvent): + document = self.compose_document() + + # Ensure that the stream contains no more documents. + if not self.check_event(StreamEndEvent): + event = self.get_event() + raise ComposerError("expected a single document in the stream", + document.start_mark, "but found another document", + event.start_mark) + + # Drop the STREAM-END event. + self.get_event() + + return document + + def compose_document(self): + # Drop the DOCUMENT-START event. + self.get_event() + + # Compose the root node. + node = self.compose_node(None, None) + + # Drop the DOCUMENT-END event. + self.get_event() + + self.anchors = {} + return node + + def compose_node(self, parent, index): + if self.check_event(AliasEvent): + event = self.get_event() + anchor = event.anchor + if anchor not in self.anchors: + raise ComposerError(None, None, "found undefined alias %r" + % anchor.encode('utf-8'), event.start_mark) + return self.anchors[anchor] + event = self.peek_event() + anchor = event.anchor + if anchor is not None: + if anchor in self.anchors: + raise ComposerError("found duplicate anchor %r; first occurence" + % anchor.encode('utf-8'), self.anchors[anchor].start_mark, + "second occurence", event.start_mark) + self.descend_resolver(parent, index) + if self.check_event(ScalarEvent): + node = self.compose_scalar_node(anchor) + elif self.check_event(SequenceStartEvent): + node = self.compose_sequence_node(anchor) + elif self.check_event(MappingStartEvent): + node = self.compose_mapping_node(anchor) + self.ascend_resolver() + return node + + def compose_scalar_node(self, anchor): + event = self.get_event() + tag = event.tag + if tag is None or tag == u'!': + tag = self.resolve(ScalarNode, event.value, event.implicit) + node = ScalarNode(tag, event.value, + event.start_mark, event.end_mark, style=event.style) + if anchor is not None: + self.anchors[anchor] = node + return node + + def compose_sequence_node(self, anchor): + start_event = self.get_event() + tag = start_event.tag + if tag is None or tag == u'!': + tag = self.resolve(SequenceNode, None, start_event.implicit) + node = SequenceNode(tag, [], + start_event.start_mark, None, + flow_style=start_event.flow_style) + if anchor is not None: + self.anchors[anchor] = node + index = 0 + while not self.check_event(SequenceEndEvent): + node.value.append(self.compose_node(node, index)) + index += 1 + end_event = self.get_event() + node.end_mark = end_event.end_mark + return node + + def compose_mapping_node(self, anchor): + start_event = self.get_event() + tag = start_event.tag + if tag is None or tag == u'!': + tag = self.resolve(MappingNode, None, start_event.implicit) + node = MappingNode(tag, [], + start_event.start_mark, None, + flow_style=start_event.flow_style) + if anchor is not None: + self.anchors[anchor] = node + while not self.check_event(MappingEndEvent): + #key_event = self.peek_event() + item_key = self.compose_node(node, None) + #if item_key in node.value: + # raise ComposerError("while composing a mapping", start_event.start_mark, + # "found duplicate key", key_event.start_mark) + item_value = self.compose_node(node, item_key) + #node.value[item_key] = item_value + node.value.append((item_key, item_value)) + end_event = self.get_event() + node.end_mark = end_event.end_mark + return node + diff --git a/python/ext-libs/yaml/constructor.py b/python/ext-libs/yaml/constructor.py new file mode 100755 index 000000000000..635faac3e6fe --- /dev/null +++ b/python/ext-libs/yaml/constructor.py @@ -0,0 +1,675 @@ + +__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor', + 'ConstructorError'] + +from error import * +from nodes import * + +import datetime + +import binascii, re, sys, types + +class ConstructorError(MarkedYAMLError): + pass + +class BaseConstructor(object): + + yaml_constructors = {} + yaml_multi_constructors = {} + + def __init__(self): + self.constructed_objects = {} + self.recursive_objects = {} + self.state_generators = [] + self.deep_construct = False + + def check_data(self): + # If there are more documents available? + return self.check_node() + + def get_data(self): + # Construct and return the next document. + if self.check_node(): + return self.construct_document(self.get_node()) + + def get_single_data(self): + # Ensure that the stream contains a single document and construct it. + node = self.get_single_node() + if node is not None: + return self.construct_document(node) + return None + + def construct_document(self, node): + data = self.construct_object(node) + while self.state_generators: + state_generators = self.state_generators + self.state_generators = [] + for generator in state_generators: + for dummy in generator: + pass + self.constructed_objects = {} + self.recursive_objects = {} + self.deep_construct = False + return data + + def construct_object(self, node, deep=False): + if node in self.constructed_objects: + return self.constructed_objects[node] + if deep: + old_deep = self.deep_construct + self.deep_construct = True + if node in self.recursive_objects: + raise ConstructorError(None, None, + "found unconstructable recursive node", node.start_mark) + self.recursive_objects[node] = None + constructor = None + tag_suffix = None + if node.tag in self.yaml_constructors: + constructor = self.yaml_constructors[node.tag] + else: + for tag_prefix in self.yaml_multi_constructors: + if node.tag.startswith(tag_prefix): + tag_suffix = node.tag[len(tag_prefix):] + constructor = self.yaml_multi_constructors[tag_prefix] + break + else: + if None in self.yaml_multi_constructors: + tag_suffix = node.tag + constructor = self.yaml_multi_constructors[None] + elif None in self.yaml_constructors: + constructor = self.yaml_constructors[None] + elif isinstance(node, ScalarNode): + constructor = self.__class__.construct_scalar + elif isinstance(node, SequenceNode): + constructor = self.__class__.construct_sequence + elif isinstance(node, MappingNode): + constructor = self.__class__.construct_mapping + if tag_suffix is None: + data = constructor(self, node) + else: + data = constructor(self, tag_suffix, node) + if isinstance(data, types.GeneratorType): + generator = data + data = generator.next() + if self.deep_construct: + for dummy in generator: + pass + else: + self.state_generators.append(generator) + self.constructed_objects[node] = data + del self.recursive_objects[node] + if deep: + self.deep_construct = old_deep + return data + + def construct_scalar(self, node): + if not isinstance(node, ScalarNode): + raise ConstructorError(None, None, + "expected a scalar node, but found %s" % node.id, + node.start_mark) + return node.value + + def construct_sequence(self, node, deep=False): + if not isinstance(node, SequenceNode): + raise ConstructorError(None, None, + "expected a sequence node, but found %s" % node.id, + node.start_mark) + return [self.construct_object(child, deep=deep) + for child in node.value] + + def construct_mapping(self, node, deep=False): + if not isinstance(node, MappingNode): + raise ConstructorError(None, None, + "expected a mapping node, but found %s" % node.id, + node.start_mark) + mapping = {} + for key_node, value_node in node.value: + key = self.construct_object(key_node, deep=deep) + try: + hash(key) + except TypeError, exc: + raise ConstructorError("while constructing a mapping", node.start_mark, + "found unacceptable key (%s)" % exc, key_node.start_mark) + value = self.construct_object(value_node, deep=deep) + mapping[key] = value + return mapping + + def construct_pairs(self, node, deep=False): + if not isinstance(node, MappingNode): + raise ConstructorError(None, None, + "expected a mapping node, but found %s" % node.id, + node.start_mark) + pairs = [] + for key_node, value_node in node.value: + key = self.construct_object(key_node, deep=deep) + value = self.construct_object(value_node, deep=deep) + pairs.append((key, value)) + return pairs + + def add_constructor(cls, tag, constructor): + if not 'yaml_constructors' in cls.__dict__: + cls.yaml_constructors = cls.yaml_constructors.copy() + cls.yaml_constructors[tag] = constructor + add_constructor = classmethod(add_constructor) + + def add_multi_constructor(cls, tag_prefix, multi_constructor): + if not 'yaml_multi_constructors' in cls.__dict__: + cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy() + cls.yaml_multi_constructors[tag_prefix] = multi_constructor + add_multi_constructor = classmethod(add_multi_constructor) + +class SafeConstructor(BaseConstructor): + + def construct_scalar(self, node): + if isinstance(node, MappingNode): + for key_node, value_node in node.value: + if key_node.tag == u'tag:yaml.org,2002:value': + return self.construct_scalar(value_node) + return BaseConstructor.construct_scalar(self, node) + + def flatten_mapping(self, node): + merge = [] + index = 0 + while index < len(node.value): + key_node, value_node = node.value[index] + if key_node.tag == u'tag:yaml.org,2002:merge': + del node.value[index] + if isinstance(value_node, MappingNode): + self.flatten_mapping(value_node) + merge.extend(value_node.value) + elif isinstance(value_node, SequenceNode): + submerge = [] + for subnode in value_node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError("while constructing a mapping", + node.start_mark, + "expected a mapping for merging, but found %s" + % subnode.id, subnode.start_mark) + self.flatten_mapping(subnode) + submerge.append(subnode.value) + submerge.reverse() + for value in submerge: + merge.extend(value) + else: + raise ConstructorError("while constructing a mapping", node.start_mark, + "expected a mapping or list of mappings for merging, but found %s" + % value_node.id, value_node.start_mark) + elif key_node.tag == u'tag:yaml.org,2002:value': + key_node.tag = u'tag:yaml.org,2002:str' + index += 1 + else: + index += 1 + if merge: + node.value = merge + node.value + + def construct_mapping(self, node, deep=False): + if isinstance(node, MappingNode): + self.flatten_mapping(node) + return BaseConstructor.construct_mapping(self, node, deep=deep) + + def construct_yaml_null(self, node): + self.construct_scalar(node) + return None + + bool_values = { + u'yes': True, + u'no': False, + u'true': True, + u'false': False, + u'on': True, + u'off': False, + } + + def construct_yaml_bool(self, node): + value = self.construct_scalar(node) + return self.bool_values[value.lower()] + + def construct_yaml_int(self, node): + value = str(self.construct_scalar(node)) + value = value.replace('_', '') + sign = +1 + if value[0] == '-': + sign = -1 + if value[0] in '+-': + value = value[1:] + if value == '0': + return 0 + elif value.startswith('0b'): + return sign*int(value[2:], 2) + elif value.startswith('0x'): + return sign*int(value[2:], 16) + elif value[0] == '0': + return sign*int(value, 8) + elif ':' in value: + digits = [int(part) for part in value.split(':')] + digits.reverse() + base = 1 + value = 0 + for digit in digits: + value += digit*base + base *= 60 + return sign*value + else: + return sign*int(value) + + inf_value = 1e300 + while inf_value != inf_value*inf_value: + inf_value *= inf_value + nan_value = -inf_value/inf_value # Trying to make a quiet NaN (like C99). + + def construct_yaml_float(self, node): + value = str(self.construct_scalar(node)) + value = value.replace('_', '').lower() + sign = +1 + if value[0] == '-': + sign = -1 + if value[0] in '+-': + value = value[1:] + if value == '.inf': + return sign*self.inf_value + elif value == '.nan': + return self.nan_value + elif ':' in value: + digits = [float(part) for part in value.split(':')] + digits.reverse() + base = 1 + value = 0.0 + for digit in digits: + value += digit*base + base *= 60 + return sign*value + else: + return sign*float(value) + + def construct_yaml_binary(self, node): + value = self.construct_scalar(node) + try: + return str(value).decode('base64') + except (binascii.Error, UnicodeEncodeError), exc: + raise ConstructorError(None, None, + "failed to decode base64 data: %s" % exc, node.start_mark) + + timestamp_regexp = re.compile( + ur'''^(?P[0-9][0-9][0-9][0-9]) + -(?P[0-9][0-9]?) + -(?P[0-9][0-9]?) + (?:(?:[Tt]|[ \t]+) + (?P[0-9][0-9]?) + :(?P[0-9][0-9]) + :(?P[0-9][0-9]) + (?:\.(?P[0-9]*))? + (?:[ \t]*(?PZ|(?P[-+])(?P[0-9][0-9]?) + (?::(?P[0-9][0-9]))?))?)?$''', re.X) + + def construct_yaml_timestamp(self, node): + value = self.construct_scalar(node) + match = self.timestamp_regexp.match(node.value) + values = match.groupdict() + year = int(values['year']) + month = int(values['month']) + day = int(values['day']) + if not values['hour']: + return datetime.date(year, month, day) + hour = int(values['hour']) + minute = int(values['minute']) + second = int(values['second']) + fraction = 0 + if values['fraction']: + fraction = values['fraction'][:6] + while len(fraction) < 6: + fraction += '0' + fraction = int(fraction) + delta = None + if values['tz_sign']: + tz_hour = int(values['tz_hour']) + tz_minute = int(values['tz_minute'] or 0) + delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute) + if values['tz_sign'] == '-': + delta = -delta + data = datetime.datetime(year, month, day, hour, minute, second, fraction) + if delta: + data -= delta + return data + + def construct_yaml_omap(self, node): + # Note: we do not check for duplicate keys, because it's too + # CPU-expensive. + omap = [] + yield omap + if not isinstance(node, SequenceNode): + raise ConstructorError("while constructing an ordered map", node.start_mark, + "expected a sequence, but found %s" % node.id, node.start_mark) + for subnode in node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError("while constructing an ordered map", node.start_mark, + "expected a mapping of length 1, but found %s" % subnode.id, + subnode.start_mark) + if len(subnode.value) != 1: + raise ConstructorError("while constructing an ordered map", node.start_mark, + "expected a single mapping item, but found %d items" % len(subnode.value), + subnode.start_mark) + key_node, value_node = subnode.value[0] + key = self.construct_object(key_node) + value = self.construct_object(value_node) + omap.append((key, value)) + + def construct_yaml_pairs(self, node): + # Note: the same code as `construct_yaml_omap`. + pairs = [] + yield pairs + if not isinstance(node, SequenceNode): + raise ConstructorError("while constructing pairs", node.start_mark, + "expected a sequence, but found %s" % node.id, node.start_mark) + for subnode in node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError("while constructing pairs", node.start_mark, + "expected a mapping of length 1, but found %s" % subnode.id, + subnode.start_mark) + if len(subnode.value) != 1: + raise ConstructorError("while constructing pairs", node.start_mark, + "expected a single mapping item, but found %d items" % len(subnode.value), + subnode.start_mark) + key_node, value_node = subnode.value[0] + key = self.construct_object(key_node) + value = self.construct_object(value_node) + pairs.append((key, value)) + + def construct_yaml_set(self, node): + data = set() + yield data + value = self.construct_mapping(node) + data.update(value) + + def construct_yaml_str(self, node): + value = self.construct_scalar(node) + try: + return value.encode('ascii') + except UnicodeEncodeError: + return value + + def construct_yaml_seq(self, node): + data = [] + yield data + data.extend(self.construct_sequence(node)) + + def construct_yaml_map(self, node): + data = {} + yield data + value = self.construct_mapping(node) + data.update(value) + + def construct_yaml_object(self, node, cls): + data = cls.__new__(cls) + yield data + if hasattr(data, '__setstate__'): + state = self.construct_mapping(node, deep=True) + data.__setstate__(state) + else: + state = self.construct_mapping(node) + data.__dict__.update(state) + + def construct_undefined(self, node): + raise ConstructorError(None, None, + "could not determine a constructor for the tag %r" % node.tag.encode('utf-8'), + node.start_mark) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:null', + SafeConstructor.construct_yaml_null) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:bool', + SafeConstructor.construct_yaml_bool) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:int', + SafeConstructor.construct_yaml_int) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:float', + SafeConstructor.construct_yaml_float) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:binary', + SafeConstructor.construct_yaml_binary) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:timestamp', + SafeConstructor.construct_yaml_timestamp) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:omap', + SafeConstructor.construct_yaml_omap) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:pairs', + SafeConstructor.construct_yaml_pairs) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:set', + SafeConstructor.construct_yaml_set) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:str', + SafeConstructor.construct_yaml_str) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:seq', + SafeConstructor.construct_yaml_seq) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:map', + SafeConstructor.construct_yaml_map) + +SafeConstructor.add_constructor(None, + SafeConstructor.construct_undefined) + +class Constructor(SafeConstructor): + + def construct_python_str(self, node): + return self.construct_scalar(node).encode('utf-8') + + def construct_python_unicode(self, node): + return self.construct_scalar(node) + + def construct_python_long(self, node): + return long(self.construct_yaml_int(node)) + + def construct_python_complex(self, node): + return complex(self.construct_scalar(node)) + + def construct_python_tuple(self, node): + return tuple(self.construct_sequence(node)) + + def find_python_module(self, name, mark): + if not name: + raise ConstructorError("while constructing a Python module", mark, + "expected non-empty name appended to the tag", mark) + try: + __import__(name) + except ImportError, exc: + raise ConstructorError("while constructing a Python module", mark, + "cannot find module %r (%s)" % (name.encode('utf-8'), exc), mark) + return sys.modules[name] + + def find_python_name(self, name, mark): + if not name: + raise ConstructorError("while constructing a Python object", mark, + "expected non-empty name appended to the tag", mark) + if u'.' in name: + module_name, object_name = name.rsplit('.', 1) + else: + module_name = '__builtin__' + object_name = name + try: + __import__(module_name) + except ImportError, exc: + raise ConstructorError("while constructing a Python object", mark, + "cannot find module %r (%s)" % (module_name.encode('utf-8'), exc), mark) + module = sys.modules[module_name] + if not hasattr(module, object_name): + raise ConstructorError("while constructing a Python object", mark, + "cannot find %r in the module %r" % (object_name.encode('utf-8'), + module.__name__), mark) + return getattr(module, object_name) + + def construct_python_name(self, suffix, node): + value = self.construct_scalar(node) + if value: + raise ConstructorError("while constructing a Python name", node.start_mark, + "expected the empty value, but found %r" % value.encode('utf-8'), + node.start_mark) + return self.find_python_name(suffix, node.start_mark) + + def construct_python_module(self, suffix, node): + value = self.construct_scalar(node) + if value: + raise ConstructorError("while constructing a Python module", node.start_mark, + "expected the empty value, but found %r" % value.encode('utf-8'), + node.start_mark) + return self.find_python_module(suffix, node.start_mark) + + class classobj: pass + + def make_python_instance(self, suffix, node, + args=None, kwds=None, newobj=False): + if not args: + args = [] + if not kwds: + kwds = {} + cls = self.find_python_name(suffix, node.start_mark) + if newobj and isinstance(cls, type(self.classobj)) \ + and not args and not kwds: + instance = self.classobj() + instance.__class__ = cls + return instance + elif newobj and isinstance(cls, type): + return cls.__new__(cls, *args, **kwds) + else: + return cls(*args, **kwds) + + def set_python_instance_state(self, instance, state): + if hasattr(instance, '__setstate__'): + instance.__setstate__(state) + else: + slotstate = {} + if isinstance(state, tuple) and len(state) == 2: + state, slotstate = state + if hasattr(instance, '__dict__'): + instance.__dict__.update(state) + elif state: + slotstate.update(state) + for key, value in slotstate.items(): + setattr(object, key, value) + + def construct_python_object(self, suffix, node): + # Format: + # !!python/object:module.name { ... state ... } + instance = self.make_python_instance(suffix, node, newobj=True) + yield instance + deep = hasattr(instance, '__setstate__') + state = self.construct_mapping(node, deep=deep) + self.set_python_instance_state(instance, state) + + def construct_python_object_apply(self, suffix, node, newobj=False): + # Format: + # !!python/object/apply # (or !!python/object/new) + # args: [ ... arguments ... ] + # kwds: { ... keywords ... } + # state: ... state ... + # listitems: [ ... listitems ... ] + # dictitems: { ... dictitems ... } + # or short format: + # !!python/object/apply [ ... arguments ... ] + # The difference between !!python/object/apply and !!python/object/new + # is how an object is created, check make_python_instance for details. + if isinstance(node, SequenceNode): + args = self.construct_sequence(node, deep=True) + kwds = {} + state = {} + listitems = [] + dictitems = {} + else: + value = self.construct_mapping(node, deep=True) + args = value.get('args', []) + kwds = value.get('kwds', {}) + state = value.get('state', {}) + listitems = value.get('listitems', []) + dictitems = value.get('dictitems', {}) + instance = self.make_python_instance(suffix, node, args, kwds, newobj) + if state: + self.set_python_instance_state(instance, state) + if listitems: + instance.extend(listitems) + if dictitems: + for key in dictitems: + instance[key] = dictitems[key] + return instance + + def construct_python_object_new(self, suffix, node): + return self.construct_python_object_apply(suffix, node, newobj=True) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/none', + Constructor.construct_yaml_null) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/bool', + Constructor.construct_yaml_bool) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/str', + Constructor.construct_python_str) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/unicode', + Constructor.construct_python_unicode) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/int', + Constructor.construct_yaml_int) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/long', + Constructor.construct_python_long) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/float', + Constructor.construct_yaml_float) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/complex', + Constructor.construct_python_complex) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/list', + Constructor.construct_yaml_seq) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/tuple', + Constructor.construct_python_tuple) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/dict', + Constructor.construct_yaml_map) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/name:', + Constructor.construct_python_name) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/module:', + Constructor.construct_python_module) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/object:', + Constructor.construct_python_object) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/object/apply:', + Constructor.construct_python_object_apply) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/object/new:', + Constructor.construct_python_object_new) + diff --git a/python/ext-libs/yaml/cyaml.py b/python/ext-libs/yaml/cyaml.py new file mode 100755 index 000000000000..68dcd7519288 --- /dev/null +++ b/python/ext-libs/yaml/cyaml.py @@ -0,0 +1,85 @@ + +__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader', + 'CBaseDumper', 'CSafeDumper', 'CDumper'] + +from _yaml import CParser, CEmitter + +from constructor import * + +from serializer import * +from representer import * + +from resolver import * + +class CBaseLoader(CParser, BaseConstructor, BaseResolver): + + def __init__(self, stream): + CParser.__init__(self, stream) + BaseConstructor.__init__(self) + BaseResolver.__init__(self) + +class CSafeLoader(CParser, SafeConstructor, Resolver): + + def __init__(self, stream): + CParser.__init__(self, stream) + SafeConstructor.__init__(self) + Resolver.__init__(self) + +class CLoader(CParser, Constructor, Resolver): + + def __init__(self, stream): + CParser.__init__(self, stream) + Constructor.__init__(self) + Resolver.__init__(self) + +class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + CEmitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, encoding=encoding, + allow_unicode=allow_unicode, line_break=line_break, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + Representer.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + +class CSafeDumper(CEmitter, SafeRepresenter, Resolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + CEmitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, encoding=encoding, + allow_unicode=allow_unicode, line_break=line_break, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + SafeRepresenter.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + +class CDumper(CEmitter, Serializer, Representer, Resolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + CEmitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, encoding=encoding, + allow_unicode=allow_unicode, line_break=line_break, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + Representer.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + diff --git a/python/ext-libs/yaml/dumper.py b/python/ext-libs/yaml/dumper.py new file mode 100755 index 000000000000..f811d2c919bf --- /dev/null +++ b/python/ext-libs/yaml/dumper.py @@ -0,0 +1,62 @@ + +__all__ = ['BaseDumper', 'SafeDumper', 'Dumper'] + +from emitter import * +from serializer import * +from representer import * +from resolver import * + +class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + Emitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break) + Serializer.__init__(self, encoding=encoding, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + Representer.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + +class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + Emitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break) + Serializer.__init__(self, encoding=encoding, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + SafeRepresenter.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + +class Dumper(Emitter, Serializer, Representer, Resolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + Emitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break) + Serializer.__init__(self, encoding=encoding, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + Representer.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + diff --git a/python/ext-libs/yaml/emitter.py b/python/ext-libs/yaml/emitter.py new file mode 100755 index 000000000000..e5bcdcccbb1f --- /dev/null +++ b/python/ext-libs/yaml/emitter.py @@ -0,0 +1,1140 @@ + +# Emitter expects events obeying the following grammar: +# stream ::= STREAM-START document* STREAM-END +# document ::= DOCUMENT-START node DOCUMENT-END +# node ::= SCALAR | sequence | mapping +# sequence ::= SEQUENCE-START node* SEQUENCE-END +# mapping ::= MAPPING-START (node node)* MAPPING-END + +__all__ = ['Emitter', 'EmitterError'] + +from error import YAMLError +from events import * + +class EmitterError(YAMLError): + pass + +class ScalarAnalysis(object): + def __init__(self, scalar, empty, multiline, + allow_flow_plain, allow_block_plain, + allow_single_quoted, allow_double_quoted, + allow_block): + self.scalar = scalar + self.empty = empty + self.multiline = multiline + self.allow_flow_plain = allow_flow_plain + self.allow_block_plain = allow_block_plain + self.allow_single_quoted = allow_single_quoted + self.allow_double_quoted = allow_double_quoted + self.allow_block = allow_block + +class Emitter(object): + + DEFAULT_TAG_PREFIXES = { + u'!' : u'!', + u'tag:yaml.org,2002:' : u'!!', + } + + def __init__(self, stream, canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None): + + # The stream should have the methods `write` and possibly `flush`. + self.stream = stream + + # Encoding can be overriden by STREAM-START. + self.encoding = None + + # Emitter is a state machine with a stack of states to handle nested + # structures. + self.states = [] + self.state = self.expect_stream_start + + # Current event and the event queue. + self.events = [] + self.event = None + + # The current indentation level and the stack of previous indents. + self.indents = [] + self.indent = None + + # Flow level. + self.flow_level = 0 + + # Contexts. + self.root_context = False + self.sequence_context = False + self.mapping_context = False + self.simple_key_context = False + + # Characteristics of the last emitted character: + # - current position. + # - is it a whitespace? + # - is it an indention character + # (indentation space, '-', '?', or ':')? + self.line = 0 + self.column = 0 + self.whitespace = True + self.indention = True + + # Whether the document requires an explicit document indicator + self.open_ended = False + + # Formatting details. + self.canonical = canonical + self.allow_unicode = allow_unicode + self.best_indent = 2 + if indent and 1 < indent < 10: + self.best_indent = indent + self.best_width = 80 + if width and width > self.best_indent*2: + self.best_width = width + self.best_line_break = u'\n' + if line_break in [u'\r', u'\n', u'\r\n']: + self.best_line_break = line_break + + # Tag prefixes. + self.tag_prefixes = None + + # Prepared anchor and tag. + self.prepared_anchor = None + self.prepared_tag = None + + # Scalar analysis and style. + self.analysis = None + self.style = None + + def dispose(self): + # Reset the state attributes (to clear self-references) + self.states = [] + self.state = None + + def emit(self, event): + self.events.append(event) + while not self.need_more_events(): + self.event = self.events.pop(0) + self.state() + self.event = None + + # In some cases, we wait for a few next events before emitting. + + def need_more_events(self): + if not self.events: + return True + event = self.events[0] + if isinstance(event, DocumentStartEvent): + return self.need_events(1) + elif isinstance(event, SequenceStartEvent): + return self.need_events(2) + elif isinstance(event, MappingStartEvent): + return self.need_events(3) + else: + return False + + def need_events(self, count): + level = 0 + for event in self.events[1:]: + if isinstance(event, (DocumentStartEvent, CollectionStartEvent)): + level += 1 + elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)): + level -= 1 + elif isinstance(event, StreamEndEvent): + level = -1 + if level < 0: + return False + return (len(self.events) < count+1) + + def increase_indent(self, flow=False, indentless=False): + self.indents.append(self.indent) + if self.indent is None: + if flow: + self.indent = self.best_indent + else: + self.indent = 0 + elif not indentless: + self.indent += self.best_indent + + # States. + + # Stream handlers. + + def expect_stream_start(self): + if isinstance(self.event, StreamStartEvent): + if self.event.encoding and not getattr(self.stream, 'encoding', None): + self.encoding = self.event.encoding + self.write_stream_start() + self.state = self.expect_first_document_start + else: + raise EmitterError("expected StreamStartEvent, but got %s" + % self.event) + + def expect_nothing(self): + raise EmitterError("expected nothing, but got %s" % self.event) + + # Document handlers. + + def expect_first_document_start(self): + return self.expect_document_start(first=True) + + def expect_document_start(self, first=False): + if isinstance(self.event, DocumentStartEvent): + if (self.event.version or self.event.tags) and self.open_ended: + self.write_indicator(u'...', True) + self.write_indent() + if self.event.version: + version_text = self.prepare_version(self.event.version) + self.write_version_directive(version_text) + self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy() + if self.event.tags: + handles = self.event.tags.keys() + handles.sort() + for handle in handles: + prefix = self.event.tags[handle] + self.tag_prefixes[prefix] = handle + handle_text = self.prepare_tag_handle(handle) + prefix_text = self.prepare_tag_prefix(prefix) + self.write_tag_directive(handle_text, prefix_text) + implicit = (first and not self.event.explicit and not self.canonical + and not self.event.version and not self.event.tags + and not self.check_empty_document()) + if not implicit: + self.write_indent() + self.write_indicator(u'---', True) + if self.canonical: + self.write_indent() + self.state = self.expect_document_root + elif isinstance(self.event, StreamEndEvent): + if self.open_ended: + self.write_indicator(u'...', True) + self.write_indent() + self.write_stream_end() + self.state = self.expect_nothing + else: + raise EmitterError("expected DocumentStartEvent, but got %s" + % self.event) + + def expect_document_end(self): + if isinstance(self.event, DocumentEndEvent): + self.write_indent() + if self.event.explicit: + self.write_indicator(u'...', True) + self.write_indent() + self.flush_stream() + self.state = self.expect_document_start + else: + raise EmitterError("expected DocumentEndEvent, but got %s" + % self.event) + + def expect_document_root(self): + self.states.append(self.expect_document_end) + self.expect_node(root=True) + + # Node handlers. + + def expect_node(self, root=False, sequence=False, mapping=False, + simple_key=False): + self.root_context = root + self.sequence_context = sequence + self.mapping_context = mapping + self.simple_key_context = simple_key + if isinstance(self.event, AliasEvent): + self.expect_alias() + elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)): + self.process_anchor(u'&') + self.process_tag() + if isinstance(self.event, ScalarEvent): + self.expect_scalar() + elif isinstance(self.event, SequenceStartEvent): + if self.flow_level or self.canonical or self.event.flow_style \ + or self.check_empty_sequence(): + self.expect_flow_sequence() + else: + self.expect_block_sequence() + elif isinstance(self.event, MappingStartEvent): + if self.flow_level or self.canonical or self.event.flow_style \ + or self.check_empty_mapping(): + self.expect_flow_mapping() + else: + self.expect_block_mapping() + else: + raise EmitterError("expected NodeEvent, but got %s" % self.event) + + def expect_alias(self): + if self.event.anchor is None: + raise EmitterError("anchor is not specified for alias") + self.process_anchor(u'*') + self.state = self.states.pop() + + def expect_scalar(self): + self.increase_indent(flow=True) + self.process_scalar() + self.indent = self.indents.pop() + self.state = self.states.pop() + + # Flow sequence handlers. + + def expect_flow_sequence(self): + self.write_indicator(u'[', True, whitespace=True) + self.flow_level += 1 + self.increase_indent(flow=True) + self.state = self.expect_first_flow_sequence_item + + def expect_first_flow_sequence_item(self): + if isinstance(self.event, SequenceEndEvent): + self.indent = self.indents.pop() + self.flow_level -= 1 + self.write_indicator(u']', False) + self.state = self.states.pop() + else: + if self.canonical or self.column > self.best_width: + self.write_indent() + self.states.append(self.expect_flow_sequence_item) + self.expect_node(sequence=True) + + def expect_flow_sequence_item(self): + if isinstance(self.event, SequenceEndEvent): + self.indent = self.indents.pop() + self.flow_level -= 1 + if self.canonical: + self.write_indicator(u',', False) + self.write_indent() + self.write_indicator(u']', False) + self.state = self.states.pop() + else: + self.write_indicator(u',', False) + if self.canonical or self.column > self.best_width: + self.write_indent() + self.states.append(self.expect_flow_sequence_item) + self.expect_node(sequence=True) + + # Flow mapping handlers. + + def expect_flow_mapping(self): + self.write_indicator(u'{', True, whitespace=True) + self.flow_level += 1 + self.increase_indent(flow=True) + self.state = self.expect_first_flow_mapping_key + + def expect_first_flow_mapping_key(self): + if isinstance(self.event, MappingEndEvent): + self.indent = self.indents.pop() + self.flow_level -= 1 + self.write_indicator(u'}', False) + self.state = self.states.pop() + else: + if self.canonical or self.column > self.best_width: + self.write_indent() + if not self.canonical and self.check_simple_key(): + self.states.append(self.expect_flow_mapping_simple_value) + self.expect_node(mapping=True, simple_key=True) + else: + self.write_indicator(u'?', True) + self.states.append(self.expect_flow_mapping_value) + self.expect_node(mapping=True) + + def expect_flow_mapping_key(self): + if isinstance(self.event, MappingEndEvent): + self.indent = self.indents.pop() + self.flow_level -= 1 + if self.canonical: + self.write_indicator(u',', False) + self.write_indent() + self.write_indicator(u'}', False) + self.state = self.states.pop() + else: + self.write_indicator(u',', False) + if self.canonical or self.column > self.best_width: + self.write_indent() + if not self.canonical and self.check_simple_key(): + self.states.append(self.expect_flow_mapping_simple_value) + self.expect_node(mapping=True, simple_key=True) + else: + self.write_indicator(u'?', True) + self.states.append(self.expect_flow_mapping_value) + self.expect_node(mapping=True) + + def expect_flow_mapping_simple_value(self): + self.write_indicator(u':', False) + self.states.append(self.expect_flow_mapping_key) + self.expect_node(mapping=True) + + def expect_flow_mapping_value(self): + if self.canonical or self.column > self.best_width: + self.write_indent() + self.write_indicator(u':', True) + self.states.append(self.expect_flow_mapping_key) + self.expect_node(mapping=True) + + # Block sequence handlers. + + def expect_block_sequence(self): + indentless = (self.mapping_context and not self.indention) + self.increase_indent(flow=False, indentless=indentless) + self.state = self.expect_first_block_sequence_item + + def expect_first_block_sequence_item(self): + return self.expect_block_sequence_item(first=True) + + def expect_block_sequence_item(self, first=False): + if not first and isinstance(self.event, SequenceEndEvent): + self.indent = self.indents.pop() + self.state = self.states.pop() + else: + self.write_indent() + self.write_indicator(u'-', True, indention=True) + self.states.append(self.expect_block_sequence_item) + self.expect_node(sequence=True) + + # Block mapping handlers. + + def expect_block_mapping(self): + self.increase_indent(flow=False) + self.state = self.expect_first_block_mapping_key + + def expect_first_block_mapping_key(self): + return self.expect_block_mapping_key(first=True) + + def expect_block_mapping_key(self, first=False): + if not first and isinstance(self.event, MappingEndEvent): + self.indent = self.indents.pop() + self.state = self.states.pop() + else: + self.write_indent() + if self.check_simple_key(): + self.states.append(self.expect_block_mapping_simple_value) + self.expect_node(mapping=True, simple_key=True) + else: + self.write_indicator(u'?', True, indention=True) + self.states.append(self.expect_block_mapping_value) + self.expect_node(mapping=True) + + def expect_block_mapping_simple_value(self): + self.write_indicator(u':', False) + self.states.append(self.expect_block_mapping_key) + self.expect_node(mapping=True) + + def expect_block_mapping_value(self): + self.write_indent() + self.write_indicator(u':', True, indention=True) + self.states.append(self.expect_block_mapping_key) + self.expect_node(mapping=True) + + # Checkers. + + def check_empty_sequence(self): + return (isinstance(self.event, SequenceStartEvent) and self.events + and isinstance(self.events[0], SequenceEndEvent)) + + def check_empty_mapping(self): + return (isinstance(self.event, MappingStartEvent) and self.events + and isinstance(self.events[0], MappingEndEvent)) + + def check_empty_document(self): + if not isinstance(self.event, DocumentStartEvent) or not self.events: + return False + event = self.events[0] + return (isinstance(event, ScalarEvent) and event.anchor is None + and event.tag is None and event.implicit and event.value == u'') + + def check_simple_key(self): + length = 0 + if isinstance(self.event, NodeEvent) and self.event.anchor is not None: + if self.prepared_anchor is None: + self.prepared_anchor = self.prepare_anchor(self.event.anchor) + length += len(self.prepared_anchor) + if isinstance(self.event, (ScalarEvent, CollectionStartEvent)) \ + and self.event.tag is not None: + if self.prepared_tag is None: + self.prepared_tag = self.prepare_tag(self.event.tag) + length += len(self.prepared_tag) + if isinstance(self.event, ScalarEvent): + if self.analysis is None: + self.analysis = self.analyze_scalar(self.event.value) + length += len(self.analysis.scalar) + return (length < 128 and (isinstance(self.event, AliasEvent) + or (isinstance(self.event, ScalarEvent) + and not self.analysis.empty and not self.analysis.multiline) + or self.check_empty_sequence() or self.check_empty_mapping())) + + # Anchor, Tag, and Scalar processors. + + def process_anchor(self, indicator): + if self.event.anchor is None: + self.prepared_anchor = None + return + if self.prepared_anchor is None: + self.prepared_anchor = self.prepare_anchor(self.event.anchor) + if self.prepared_anchor: + self.write_indicator(indicator+self.prepared_anchor, True) + self.prepared_anchor = None + + def process_tag(self): + tag = self.event.tag + if isinstance(self.event, ScalarEvent): + if self.style is None: + self.style = self.choose_scalar_style() + if ((not self.canonical or tag is None) and + ((self.style == '' and self.event.implicit[0]) + or (self.style != '' and self.event.implicit[1]))): + self.prepared_tag = None + return + if self.event.implicit[0] and tag is None: + tag = u'!' + self.prepared_tag = None + else: + if (not self.canonical or tag is None) and self.event.implicit: + self.prepared_tag = None + return + if tag is None: + raise EmitterError("tag is not specified") + if self.prepared_tag is None: + self.prepared_tag = self.prepare_tag(tag) + if self.prepared_tag: + self.write_indicator(self.prepared_tag, True) + self.prepared_tag = None + + def choose_scalar_style(self): + if self.analysis is None: + self.analysis = self.analyze_scalar(self.event.value) + if self.event.style == '"' or self.canonical: + return '"' + if not self.event.style and self.event.implicit[0]: + if (not (self.simple_key_context and + (self.analysis.empty or self.analysis.multiline)) + and (self.flow_level and self.analysis.allow_flow_plain + or (not self.flow_level and self.analysis.allow_block_plain))): + return '' + if self.event.style and self.event.style in '|>': + if (not self.flow_level and not self.simple_key_context + and self.analysis.allow_block): + return self.event.style + if not self.event.style or self.event.style == '\'': + if (self.analysis.allow_single_quoted and + not (self.simple_key_context and self.analysis.multiline)): + return '\'' + return '"' + + def process_scalar(self): + if self.analysis is None: + self.analysis = self.analyze_scalar(self.event.value) + if self.style is None: + self.style = self.choose_scalar_style() + split = (not self.simple_key_context) + #if self.analysis.multiline and split \ + # and (not self.style or self.style in '\'\"'): + # self.write_indent() + if self.style == '"': + self.write_double_quoted(self.analysis.scalar, split) + elif self.style == '\'': + self.write_single_quoted(self.analysis.scalar, split) + elif self.style == '>': + self.write_folded(self.analysis.scalar) + elif self.style == '|': + self.write_literal(self.analysis.scalar) + else: + self.write_plain(self.analysis.scalar, split) + self.analysis = None + self.style = None + + # Analyzers. + + def prepare_version(self, version): + major, minor = version + if major != 1: + raise EmitterError("unsupported YAML version: %d.%d" % (major, minor)) + return u'%d.%d' % (major, minor) + + def prepare_tag_handle(self, handle): + if not handle: + raise EmitterError("tag handle must not be empty") + if handle[0] != u'!' or handle[-1] != u'!': + raise EmitterError("tag handle must start and end with '!': %r" + % (handle.encode('utf-8'))) + for ch in handle[1:-1]: + if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-_'): + raise EmitterError("invalid character %r in the tag handle: %r" + % (ch.encode('utf-8'), handle.encode('utf-8'))) + return handle + + def prepare_tag_prefix(self, prefix): + if not prefix: + raise EmitterError("tag prefix must not be empty") + chunks = [] + start = end = 0 + if prefix[0] == u'!': + end = 1 + while end < len(prefix): + ch = prefix[end] + if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-;/?!:@&=+$,_.~*\'()[]': + end += 1 + else: + if start < end: + chunks.append(prefix[start:end]) + start = end = end+1 + data = ch.encode('utf-8') + for ch in data: + chunks.append(u'%%%02X' % ord(ch)) + if start < end: + chunks.append(prefix[start:end]) + return u''.join(chunks) + + def prepare_tag(self, tag): + if not tag: + raise EmitterError("tag must not be empty") + if tag == u'!': + return tag + handle = None + suffix = tag + prefixes = self.tag_prefixes.keys() + prefixes.sort() + for prefix in prefixes: + if tag.startswith(prefix) \ + and (prefix == u'!' or len(prefix) < len(tag)): + handle = self.tag_prefixes[prefix] + suffix = tag[len(prefix):] + chunks = [] + start = end = 0 + while end < len(suffix): + ch = suffix[end] + if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-;/?:@&=+$,_.~*\'()[]' \ + or (ch == u'!' and handle != u'!'): + end += 1 + else: + if start < end: + chunks.append(suffix[start:end]) + start = end = end+1 + data = ch.encode('utf-8') + for ch in data: + chunks.append(u'%%%02X' % ord(ch)) + if start < end: + chunks.append(suffix[start:end]) + suffix_text = u''.join(chunks) + if handle: + return u'%s%s' % (handle, suffix_text) + else: + return u'!<%s>' % suffix_text + + def prepare_anchor(self, anchor): + if not anchor: + raise EmitterError("anchor must not be empty") + for ch in anchor: + if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-_'): + raise EmitterError("invalid character %r in the anchor: %r" + % (ch.encode('utf-8'), anchor.encode('utf-8'))) + return anchor + + def analyze_scalar(self, scalar): + + # Empty scalar is a special case. + if not scalar: + return ScalarAnalysis(scalar=scalar, empty=True, multiline=False, + allow_flow_plain=False, allow_block_plain=True, + allow_single_quoted=True, allow_double_quoted=True, + allow_block=False) + + # Indicators and special characters. + block_indicators = False + flow_indicators = False + line_breaks = False + special_characters = False + + # Important whitespace combinations. + leading_space = False + leading_break = False + trailing_space = False + trailing_break = False + break_space = False + space_break = False + + # Check document indicators. + if scalar.startswith(u'---') or scalar.startswith(u'...'): + block_indicators = True + flow_indicators = True + + # First character or preceded by a whitespace. + preceeded_by_whitespace = True + + # Last character or followed by a whitespace. + followed_by_whitespace = (len(scalar) == 1 or + scalar[1] in u'\0 \t\r\n\x85\u2028\u2029') + + # The previous character is a space. + previous_space = False + + # The previous character is a break. + previous_break = False + + index = 0 + while index < len(scalar): + ch = scalar[index] + + # Check for indicators. + if index == 0: + # Leading indicators are special characters. + if ch in u'#,[]{}&*!|>\'\"%@`': + flow_indicators = True + block_indicators = True + if ch in u'?:': + flow_indicators = True + if followed_by_whitespace: + block_indicators = True + if ch == u'-' and followed_by_whitespace: + flow_indicators = True + block_indicators = True + else: + # Some indicators cannot appear within a scalar as well. + if ch in u',?[]{}': + flow_indicators = True + if ch == u':': + flow_indicators = True + if followed_by_whitespace: + block_indicators = True + if ch == u'#' and preceeded_by_whitespace: + flow_indicators = True + block_indicators = True + + # Check for line breaks, special, and unicode characters. + if ch in u'\n\x85\u2028\u2029': + line_breaks = True + if not (ch == u'\n' or u'\x20' <= ch <= u'\x7E'): + if (ch == u'\x85' or u'\xA0' <= ch <= u'\uD7FF' + or u'\uE000' <= ch <= u'\uFFFD') and ch != u'\uFEFF': + unicode_characters = True + if not self.allow_unicode: + special_characters = True + else: + special_characters = True + + # Detect important whitespace combinations. + if ch == u' ': + if index == 0: + leading_space = True + if index == len(scalar)-1: + trailing_space = True + if previous_break: + break_space = True + previous_space = True + previous_break = False + elif ch in u'\n\x85\u2028\u2029': + if index == 0: + leading_break = True + if index == len(scalar)-1: + trailing_break = True + if previous_space: + space_break = True + previous_space = False + previous_break = True + else: + previous_space = False + previous_break = False + + # Prepare for the next character. + index += 1 + preceeded_by_whitespace = (ch in u'\0 \t\r\n\x85\u2028\u2029') + followed_by_whitespace = (index+1 >= len(scalar) or + scalar[index+1] in u'\0 \t\r\n\x85\u2028\u2029') + + # Let's decide what styles are allowed. + allow_flow_plain = True + allow_block_plain = True + allow_single_quoted = True + allow_double_quoted = True + allow_block = True + + # Leading and trailing whitespaces are bad for plain scalars. + if (leading_space or leading_break + or trailing_space or trailing_break): + allow_flow_plain = allow_block_plain = False + + # We do not permit trailing spaces for block scalars. + if trailing_space: + allow_block = False + + # Spaces at the beginning of a new line are only acceptable for block + # scalars. + if break_space: + allow_flow_plain = allow_block_plain = allow_single_quoted = False + + # Spaces followed by breaks, as well as special character are only + # allowed for double quoted scalars. + if space_break or special_characters: + allow_flow_plain = allow_block_plain = \ + allow_single_quoted = allow_block = False + + # Although the plain scalar writer supports breaks, we never emit + # multiline plain scalars. + if line_breaks: + allow_flow_plain = allow_block_plain = False + + # Flow indicators are forbidden for flow plain scalars. + if flow_indicators: + allow_flow_plain = False + + # Block indicators are forbidden for block plain scalars. + if block_indicators: + allow_block_plain = False + + return ScalarAnalysis(scalar=scalar, + empty=False, multiline=line_breaks, + allow_flow_plain=allow_flow_plain, + allow_block_plain=allow_block_plain, + allow_single_quoted=allow_single_quoted, + allow_double_quoted=allow_double_quoted, + allow_block=allow_block) + + # Writers. + + def flush_stream(self): + if hasattr(self.stream, 'flush'): + self.stream.flush() + + def write_stream_start(self): + # Write BOM if needed. + if self.encoding and self.encoding.startswith('utf-16'): + self.stream.write(u'\uFEFF'.encode(self.encoding)) + + def write_stream_end(self): + self.flush_stream() + + def write_indicator(self, indicator, need_whitespace, + whitespace=False, indention=False): + if self.whitespace or not need_whitespace: + data = indicator + else: + data = u' '+indicator + self.whitespace = whitespace + self.indention = self.indention and indention + self.column += len(data) + self.open_ended = False + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + + def write_indent(self): + indent = self.indent or 0 + if not self.indention or self.column > indent \ + or (self.column == indent and not self.whitespace): + self.write_line_break() + if self.column < indent: + self.whitespace = True + data = u' '*(indent-self.column) + self.column = indent + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + + def write_line_break(self, data=None): + if data is None: + data = self.best_line_break + self.whitespace = True + self.indention = True + self.line += 1 + self.column = 0 + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + + def write_version_directive(self, version_text): + data = u'%%YAML %s' % version_text + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.write_line_break() + + def write_tag_directive(self, handle_text, prefix_text): + data = u'%%TAG %s %s' % (handle_text, prefix_text) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.write_line_break() + + # Scalar streams. + + def write_single_quoted(self, text, split=True): + self.write_indicator(u'\'', True) + spaces = False + breaks = False + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if spaces: + if ch is None or ch != u' ': + if start+1 == end and self.column > self.best_width and split \ + and start != 0 and end != len(text): + self.write_indent() + else: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + elif breaks: + if ch is None or ch not in u'\n\x85\u2028\u2029': + if text[start] == u'\n': + self.write_line_break() + for br in text[start:end]: + if br == u'\n': + self.write_line_break() + else: + self.write_line_break(br) + self.write_indent() + start = end + else: + if ch is None or ch in u' \n\x85\u2028\u2029' or ch == u'\'': + if start < end: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + if ch == u'\'': + data = u'\'\'' + self.column += 2 + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + 1 + if ch is not None: + spaces = (ch == u' ') + breaks = (ch in u'\n\x85\u2028\u2029') + end += 1 + self.write_indicator(u'\'', False) + + ESCAPE_REPLACEMENTS = { + u'\0': u'0', + u'\x07': u'a', + u'\x08': u'b', + u'\x09': u't', + u'\x0A': u'n', + u'\x0B': u'v', + u'\x0C': u'f', + u'\x0D': u'r', + u'\x1B': u'e', + u'\"': u'\"', + u'\\': u'\\', + u'\x85': u'N', + u'\xA0': u'_', + u'\u2028': u'L', + u'\u2029': u'P', + } + + def write_double_quoted(self, text, split=True): + self.write_indicator(u'"', True) + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if ch is None or ch in u'"\\\x85\u2028\u2029\uFEFF' \ + or not (u'\x20' <= ch <= u'\x7E' + or (self.allow_unicode + and (u'\xA0' <= ch <= u'\uD7FF' + or u'\uE000' <= ch <= u'\uFFFD'))): + if start < end: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + if ch is not None: + if ch in self.ESCAPE_REPLACEMENTS: + data = u'\\'+self.ESCAPE_REPLACEMENTS[ch] + elif ch <= u'\xFF': + data = u'\\x%02X' % ord(ch) + elif ch <= u'\uFFFF': + data = u'\\u%04X' % ord(ch) + else: + data = u'\\U%08X' % ord(ch) + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end+1 + if 0 < end < len(text)-1 and (ch == u' ' or start >= end) \ + and self.column+(end-start) > self.best_width and split: + data = text[start:end]+u'\\' + if start < end: + start = end + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.write_indent() + self.whitespace = False + self.indention = False + if text[start] == u' ': + data = u'\\' + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + end += 1 + self.write_indicator(u'"', False) + + def determine_block_hints(self, text): + hints = u'' + if text: + if text[0] in u' \n\x85\u2028\u2029': + hints += unicode(self.best_indent) + if text[-1] not in u'\n\x85\u2028\u2029': + hints += u'-' + elif len(text) == 1 or text[-2] in u'\n\x85\u2028\u2029': + hints += u'+' + return hints + + def write_folded(self, text): + hints = self.determine_block_hints(text) + self.write_indicator(u'>'+hints, True) + if hints[-1:] == u'+': + self.open_ended = True + self.write_line_break() + leading_space = True + spaces = False + breaks = True + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if breaks: + if ch is None or ch not in u'\n\x85\u2028\u2029': + if not leading_space and ch is not None and ch != u' ' \ + and text[start] == u'\n': + self.write_line_break() + leading_space = (ch == u' ') + for br in text[start:end]: + if br == u'\n': + self.write_line_break() + else: + self.write_line_break(br) + if ch is not None: + self.write_indent() + start = end + elif spaces: + if ch != u' ': + if start+1 == end and self.column > self.best_width: + self.write_indent() + else: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + else: + if ch is None or ch in u' \n\x85\u2028\u2029': + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + if ch is None: + self.write_line_break() + start = end + if ch is not None: + breaks = (ch in u'\n\x85\u2028\u2029') + spaces = (ch == u' ') + end += 1 + + def write_literal(self, text): + hints = self.determine_block_hints(text) + self.write_indicator(u'|'+hints, True) + if hints[-1:] == u'+': + self.open_ended = True + self.write_line_break() + breaks = True + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if breaks: + if ch is None or ch not in u'\n\x85\u2028\u2029': + for br in text[start:end]: + if br == u'\n': + self.write_line_break() + else: + self.write_line_break(br) + if ch is not None: + self.write_indent() + start = end + else: + if ch is None or ch in u'\n\x85\u2028\u2029': + data = text[start:end] + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + if ch is None: + self.write_line_break() + start = end + if ch is not None: + breaks = (ch in u'\n\x85\u2028\u2029') + end += 1 + + def write_plain(self, text, split=True): + if self.root_context: + self.open_ended = True + if not text: + return + if not self.whitespace: + data = u' ' + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.whitespace = False + self.indention = False + spaces = False + breaks = False + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if spaces: + if ch != u' ': + if start+1 == end and self.column > self.best_width and split: + self.write_indent() + self.whitespace = False + self.indention = False + else: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + elif breaks: + if ch not in u'\n\x85\u2028\u2029': + if text[start] == u'\n': + self.write_line_break() + for br in text[start:end]: + if br == u'\n': + self.write_line_break() + else: + self.write_line_break(br) + self.write_indent() + self.whitespace = False + self.indention = False + start = end + else: + if ch is None or ch in u' \n\x85\u2028\u2029': + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + if ch is not None: + spaces = (ch == u' ') + breaks = (ch in u'\n\x85\u2028\u2029') + end += 1 + diff --git a/python/ext-libs/yaml/error.py b/python/ext-libs/yaml/error.py new file mode 100755 index 000000000000..577686db5fcd --- /dev/null +++ b/python/ext-libs/yaml/error.py @@ -0,0 +1,75 @@ + +__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError'] + +class Mark(object): + + def __init__(self, name, index, line, column, buffer, pointer): + self.name = name + self.index = index + self.line = line + self.column = column + self.buffer = buffer + self.pointer = pointer + + def get_snippet(self, indent=4, max_length=75): + if self.buffer is None: + return None + head = '' + start = self.pointer + while start > 0 and self.buffer[start-1] not in u'\0\r\n\x85\u2028\u2029': + start -= 1 + if self.pointer-start > max_length/2-1: + head = ' ... ' + start += 5 + break + tail = '' + end = self.pointer + while end < len(self.buffer) and self.buffer[end] not in u'\0\r\n\x85\u2028\u2029': + end += 1 + if end-self.pointer > max_length/2-1: + tail = ' ... ' + end -= 5 + break + snippet = self.buffer[start:end].encode('utf-8') + return ' '*indent + head + snippet + tail + '\n' \ + + ' '*(indent+self.pointer-start+len(head)) + '^' + + def __str__(self): + snippet = self.get_snippet() + where = " in \"%s\", line %d, column %d" \ + % (self.name, self.line+1, self.column+1) + if snippet is not None: + where += ":\n"+snippet + return where + +class YAMLError(Exception): + pass + +class MarkedYAMLError(YAMLError): + + def __init__(self, context=None, context_mark=None, + problem=None, problem_mark=None, note=None): + self.context = context + self.context_mark = context_mark + self.problem = problem + self.problem_mark = problem_mark + self.note = note + + def __str__(self): + lines = [] + if self.context is not None: + lines.append(self.context) + if self.context_mark is not None \ + and (self.problem is None or self.problem_mark is None + or self.context_mark.name != self.problem_mark.name + or self.context_mark.line != self.problem_mark.line + or self.context_mark.column != self.problem_mark.column): + lines.append(str(self.context_mark)) + if self.problem is not None: + lines.append(self.problem) + if self.problem_mark is not None: + lines.append(str(self.problem_mark)) + if self.note is not None: + lines.append(self.note) + return '\n'.join(lines) + diff --git a/python/ext-libs/yaml/events.py b/python/ext-libs/yaml/events.py new file mode 100755 index 000000000000..f79ad389cb6c --- /dev/null +++ b/python/ext-libs/yaml/events.py @@ -0,0 +1,86 @@ + +# Abstract classes. + +class Event(object): + def __init__(self, start_mark=None, end_mark=None): + self.start_mark = start_mark + self.end_mark = end_mark + def __repr__(self): + attributes = [key for key in ['anchor', 'tag', 'implicit', 'value'] + if hasattr(self, key)] + arguments = ', '.join(['%s=%r' % (key, getattr(self, key)) + for key in attributes]) + return '%s(%s)' % (self.__class__.__name__, arguments) + +class NodeEvent(Event): + def __init__(self, anchor, start_mark=None, end_mark=None): + self.anchor = anchor + self.start_mark = start_mark + self.end_mark = end_mark + +class CollectionStartEvent(NodeEvent): + def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None, + flow_style=None): + self.anchor = anchor + self.tag = tag + self.implicit = implicit + self.start_mark = start_mark + self.end_mark = end_mark + self.flow_style = flow_style + +class CollectionEndEvent(Event): + pass + +# Implementations. + +class StreamStartEvent(Event): + def __init__(self, start_mark=None, end_mark=None, encoding=None): + self.start_mark = start_mark + self.end_mark = end_mark + self.encoding = encoding + +class StreamEndEvent(Event): + pass + +class DocumentStartEvent(Event): + def __init__(self, start_mark=None, end_mark=None, + explicit=None, version=None, tags=None): + self.start_mark = start_mark + self.end_mark = end_mark + self.explicit = explicit + self.version = version + self.tags = tags + +class DocumentEndEvent(Event): + def __init__(self, start_mark=None, end_mark=None, + explicit=None): + self.start_mark = start_mark + self.end_mark = end_mark + self.explicit = explicit + +class AliasEvent(NodeEvent): + pass + +class ScalarEvent(NodeEvent): + def __init__(self, anchor, tag, implicit, value, + start_mark=None, end_mark=None, style=None): + self.anchor = anchor + self.tag = tag + self.implicit = implicit + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + self.style = style + +class SequenceStartEvent(CollectionStartEvent): + pass + +class SequenceEndEvent(CollectionEndEvent): + pass + +class MappingStartEvent(CollectionStartEvent): + pass + +class MappingEndEvent(CollectionEndEvent): + pass + diff --git a/python/ext-libs/yaml/loader.py b/python/ext-libs/yaml/loader.py new file mode 100755 index 000000000000..293ff467b1c2 --- /dev/null +++ b/python/ext-libs/yaml/loader.py @@ -0,0 +1,40 @@ + +__all__ = ['BaseLoader', 'SafeLoader', 'Loader'] + +from reader import * +from scanner import * +from parser import * +from composer import * +from constructor import * +from resolver import * + +class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver): + + def __init__(self, stream): + Reader.__init__(self, stream) + Scanner.__init__(self) + Parser.__init__(self) + Composer.__init__(self) + BaseConstructor.__init__(self) + BaseResolver.__init__(self) + +class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver): + + def __init__(self, stream): + Reader.__init__(self, stream) + Scanner.__init__(self) + Parser.__init__(self) + Composer.__init__(self) + SafeConstructor.__init__(self) + Resolver.__init__(self) + +class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver): + + def __init__(self, stream): + Reader.__init__(self, stream) + Scanner.__init__(self) + Parser.__init__(self) + Composer.__init__(self) + Constructor.__init__(self) + Resolver.__init__(self) + diff --git a/python/ext-libs/yaml/nodes.py b/python/ext-libs/yaml/nodes.py new file mode 100755 index 000000000000..c4f070c41e1f --- /dev/null +++ b/python/ext-libs/yaml/nodes.py @@ -0,0 +1,49 @@ + +class Node(object): + def __init__(self, tag, value, start_mark, end_mark): + self.tag = tag + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + def __repr__(self): + value = self.value + #if isinstance(value, list): + # if len(value) == 0: + # value = '' + # elif len(value) == 1: + # value = '<1 item>' + # else: + # value = '<%d items>' % len(value) + #else: + # if len(value) > 75: + # value = repr(value[:70]+u' ... ') + # else: + # value = repr(value) + value = repr(value) + return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value) + +class ScalarNode(Node): + id = 'scalar' + def __init__(self, tag, value, + start_mark=None, end_mark=None, style=None): + self.tag = tag + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + self.style = style + +class CollectionNode(Node): + def __init__(self, tag, value, + start_mark=None, end_mark=None, flow_style=None): + self.tag = tag + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + self.flow_style = flow_style + +class SequenceNode(CollectionNode): + id = 'sequence' + +class MappingNode(CollectionNode): + id = 'mapping' + diff --git a/python/ext-libs/yaml/parser.py b/python/ext-libs/yaml/parser.py new file mode 100755 index 000000000000..f9e3057f33d3 --- /dev/null +++ b/python/ext-libs/yaml/parser.py @@ -0,0 +1,589 @@ + +# The following YAML grammar is LL(1) and is parsed by a recursive descent +# parser. +# +# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +# implicit_document ::= block_node DOCUMENT-END* +# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +# block_node_or_indentless_sequence ::= +# ALIAS +# | properties (block_content | indentless_block_sequence)? +# | block_content +# | indentless_block_sequence +# block_node ::= ALIAS +# | properties block_content? +# | block_content +# flow_node ::= ALIAS +# | properties flow_content? +# | flow_content +# properties ::= TAG ANCHOR? | ANCHOR TAG? +# block_content ::= block_collection | flow_collection | SCALAR +# flow_content ::= flow_collection | SCALAR +# block_collection ::= block_sequence | block_mapping +# flow_collection ::= flow_sequence | flow_mapping +# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +# indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +# block_mapping ::= BLOCK-MAPPING_START +# ((KEY block_node_or_indentless_sequence?)? +# (VALUE block_node_or_indentless_sequence?)?)* +# BLOCK-END +# flow_sequence ::= FLOW-SEQUENCE-START +# (flow_sequence_entry FLOW-ENTRY)* +# flow_sequence_entry? +# FLOW-SEQUENCE-END +# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +# flow_mapping ::= FLOW-MAPPING-START +# (flow_mapping_entry FLOW-ENTRY)* +# flow_mapping_entry? +# FLOW-MAPPING-END +# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +# +# FIRST sets: +# +# stream: { STREAM-START } +# explicit_document: { DIRECTIVE DOCUMENT-START } +# implicit_document: FIRST(block_node) +# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START } +# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START } +# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR } +# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR } +# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START } +# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START } +# block_sequence: { BLOCK-SEQUENCE-START } +# block_mapping: { BLOCK-MAPPING-START } +# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY } +# indentless_sequence: { ENTRY } +# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START } +# flow_sequence: { FLOW-SEQUENCE-START } +# flow_mapping: { FLOW-MAPPING-START } +# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY } +# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY } + +__all__ = ['Parser', 'ParserError'] + +from error import MarkedYAMLError +from tokens import * +from events import * +from scanner import * + +class ParserError(MarkedYAMLError): + pass + +class Parser(object): + # Since writing a recursive-descendant parser is a straightforward task, we + # do not give many comments here. + + DEFAULT_TAGS = { + u'!': u'!', + u'!!': u'tag:yaml.org,2002:', + } + + def __init__(self): + self.current_event = None + self.yaml_version = None + self.tag_handles = {} + self.states = [] + self.marks = [] + self.state = self.parse_stream_start + + def dispose(self): + # Reset the state attributes (to clear self-references) + self.states = [] + self.state = None + + def check_event(self, *choices): + # Check the type of the next event. + if self.current_event is None: + if self.state: + self.current_event = self.state() + if self.current_event is not None: + if not choices: + return True + for choice in choices: + if isinstance(self.current_event, choice): + return True + return False + + def peek_event(self): + # Get the next event. + if self.current_event is None: + if self.state: + self.current_event = self.state() + return self.current_event + + def get_event(self): + # Get the next event and proceed further. + if self.current_event is None: + if self.state: + self.current_event = self.state() + value = self.current_event + self.current_event = None + return value + + # stream ::= STREAM-START implicit_document? explicit_document* STREAM-END + # implicit_document ::= block_node DOCUMENT-END* + # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* + + def parse_stream_start(self): + + # Parse the stream start. + token = self.get_token() + event = StreamStartEvent(token.start_mark, token.end_mark, + encoding=token.encoding) + + # Prepare the next state. + self.state = self.parse_implicit_document_start + + return event + + def parse_implicit_document_start(self): + + # Parse an implicit document. + if not self.check_token(DirectiveToken, DocumentStartToken, + StreamEndToken): + self.tag_handles = self.DEFAULT_TAGS + token = self.peek_token() + start_mark = end_mark = token.start_mark + event = DocumentStartEvent(start_mark, end_mark, + explicit=False) + + # Prepare the next state. + self.states.append(self.parse_document_end) + self.state = self.parse_block_node + + return event + + else: + return self.parse_document_start() + + def parse_document_start(self): + + # Parse any extra document end indicators. + while self.check_token(DocumentEndToken): + self.get_token() + + # Parse an explicit document. + if not self.check_token(StreamEndToken): + token = self.peek_token() + start_mark = token.start_mark + version, tags = self.process_directives() + if not self.check_token(DocumentStartToken): + raise ParserError(None, None, + "expected '', but found %r" + % self.peek_token().id, + self.peek_token().start_mark) + token = self.get_token() + end_mark = token.end_mark + event = DocumentStartEvent(start_mark, end_mark, + explicit=True, version=version, tags=tags) + self.states.append(self.parse_document_end) + self.state = self.parse_document_content + else: + # Parse the end of the stream. + token = self.get_token() + event = StreamEndEvent(token.start_mark, token.end_mark) + assert not self.states + assert not self.marks + self.state = None + return event + + def parse_document_end(self): + + # Parse the document end. + token = self.peek_token() + start_mark = end_mark = token.start_mark + explicit = False + if self.check_token(DocumentEndToken): + token = self.get_token() + end_mark = token.end_mark + explicit = True + event = DocumentEndEvent(start_mark, end_mark, + explicit=explicit) + + # Prepare the next state. + self.state = self.parse_document_start + + return event + + def parse_document_content(self): + if self.check_token(DirectiveToken, + DocumentStartToken, DocumentEndToken, StreamEndToken): + event = self.process_empty_scalar(self.peek_token().start_mark) + self.state = self.states.pop() + return event + else: + return self.parse_block_node() + + def process_directives(self): + self.yaml_version = None + self.tag_handles = {} + while self.check_token(DirectiveToken): + token = self.get_token() + if token.name == u'YAML': + if self.yaml_version is not None: + raise ParserError(None, None, + "found duplicate YAML directive", token.start_mark) + major, minor = token.value + if major != 1: + raise ParserError(None, None, + "found incompatible YAML document (version 1.* is required)", + token.start_mark) + self.yaml_version = token.value + elif token.name == u'TAG': + handle, prefix = token.value + if handle in self.tag_handles: + raise ParserError(None, None, + "duplicate tag handle %r" % handle.encode('utf-8'), + token.start_mark) + self.tag_handles[handle] = prefix + if self.tag_handles: + value = self.yaml_version, self.tag_handles.copy() + else: + value = self.yaml_version, None + for key in self.DEFAULT_TAGS: + if key not in self.tag_handles: + self.tag_handles[key] = self.DEFAULT_TAGS[key] + return value + + # block_node_or_indentless_sequence ::= ALIAS + # | properties (block_content | indentless_block_sequence)? + # | block_content + # | indentless_block_sequence + # block_node ::= ALIAS + # | properties block_content? + # | block_content + # flow_node ::= ALIAS + # | properties flow_content? + # | flow_content + # properties ::= TAG ANCHOR? | ANCHOR TAG? + # block_content ::= block_collection | flow_collection | SCALAR + # flow_content ::= flow_collection | SCALAR + # block_collection ::= block_sequence | block_mapping + # flow_collection ::= flow_sequence | flow_mapping + + def parse_block_node(self): + return self.parse_node(block=True) + + def parse_flow_node(self): + return self.parse_node() + + def parse_block_node_or_indentless_sequence(self): + return self.parse_node(block=True, indentless_sequence=True) + + def parse_node(self, block=False, indentless_sequence=False): + if self.check_token(AliasToken): + token = self.get_token() + event = AliasEvent(token.value, token.start_mark, token.end_mark) + self.state = self.states.pop() + else: + anchor = None + tag = None + start_mark = end_mark = tag_mark = None + if self.check_token(AnchorToken): + token = self.get_token() + start_mark = token.start_mark + end_mark = token.end_mark + anchor = token.value + if self.check_token(TagToken): + token = self.get_token() + tag_mark = token.start_mark + end_mark = token.end_mark + tag = token.value + elif self.check_token(TagToken): + token = self.get_token() + start_mark = tag_mark = token.start_mark + end_mark = token.end_mark + tag = token.value + if self.check_token(AnchorToken): + token = self.get_token() + end_mark = token.end_mark + anchor = token.value + if tag is not None: + handle, suffix = tag + if handle is not None: + if handle not in self.tag_handles: + raise ParserError("while parsing a node", start_mark, + "found undefined tag handle %r" % handle.encode('utf-8'), + tag_mark) + tag = self.tag_handles[handle]+suffix + else: + tag = suffix + #if tag == u'!': + # raise ParserError("while parsing a node", start_mark, + # "found non-specific tag '!'", tag_mark, + # "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.") + if start_mark is None: + start_mark = end_mark = self.peek_token().start_mark + event = None + implicit = (tag is None or tag == u'!') + if indentless_sequence and self.check_token(BlockEntryToken): + end_mark = self.peek_token().end_mark + event = SequenceStartEvent(anchor, tag, implicit, + start_mark, end_mark) + self.state = self.parse_indentless_sequence_entry + else: + if self.check_token(ScalarToken): + token = self.get_token() + end_mark = token.end_mark + if (token.plain and tag is None) or tag == u'!': + implicit = (True, False) + elif tag is None: + implicit = (False, True) + else: + implicit = (False, False) + event = ScalarEvent(anchor, tag, implicit, token.value, + start_mark, end_mark, style=token.style) + self.state = self.states.pop() + elif self.check_token(FlowSequenceStartToken): + end_mark = self.peek_token().end_mark + event = SequenceStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=True) + self.state = self.parse_flow_sequence_first_entry + elif self.check_token(FlowMappingStartToken): + end_mark = self.peek_token().end_mark + event = MappingStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=True) + self.state = self.parse_flow_mapping_first_key + elif block and self.check_token(BlockSequenceStartToken): + end_mark = self.peek_token().start_mark + event = SequenceStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=False) + self.state = self.parse_block_sequence_first_entry + elif block and self.check_token(BlockMappingStartToken): + end_mark = self.peek_token().start_mark + event = MappingStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=False) + self.state = self.parse_block_mapping_first_key + elif anchor is not None or tag is not None: + # Empty scalars are allowed even if a tag or an anchor is + # specified. + event = ScalarEvent(anchor, tag, (implicit, False), u'', + start_mark, end_mark) + self.state = self.states.pop() + else: + if block: + node = 'block' + else: + node = 'flow' + token = self.peek_token() + raise ParserError("while parsing a %s node" % node, start_mark, + "expected the node content, but found %r" % token.id, + token.start_mark) + return event + + # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END + + def parse_block_sequence_first_entry(self): + token = self.get_token() + self.marks.append(token.start_mark) + return self.parse_block_sequence_entry() + + def parse_block_sequence_entry(self): + if self.check_token(BlockEntryToken): + token = self.get_token() + if not self.check_token(BlockEntryToken, BlockEndToken): + self.states.append(self.parse_block_sequence_entry) + return self.parse_block_node() + else: + self.state = self.parse_block_sequence_entry + return self.process_empty_scalar(token.end_mark) + if not self.check_token(BlockEndToken): + token = self.peek_token() + raise ParserError("while parsing a block collection", self.marks[-1], + "expected , but found %r" % token.id, token.start_mark) + token = self.get_token() + event = SequenceEndEvent(token.start_mark, token.end_mark) + self.state = self.states.pop() + self.marks.pop() + return event + + # indentless_sequence ::= (BLOCK-ENTRY block_node?)+ + + def parse_indentless_sequence_entry(self): + if self.check_token(BlockEntryToken): + token = self.get_token() + if not self.check_token(BlockEntryToken, + KeyToken, ValueToken, BlockEndToken): + self.states.append(self.parse_indentless_sequence_entry) + return self.parse_block_node() + else: + self.state = self.parse_indentless_sequence_entry + return self.process_empty_scalar(token.end_mark) + token = self.peek_token() + event = SequenceEndEvent(token.start_mark, token.start_mark) + self.state = self.states.pop() + return event + + # block_mapping ::= BLOCK-MAPPING_START + # ((KEY block_node_or_indentless_sequence?)? + # (VALUE block_node_or_indentless_sequence?)?)* + # BLOCK-END + + def parse_block_mapping_first_key(self): + token = self.get_token() + self.marks.append(token.start_mark) + return self.parse_block_mapping_key() + + def parse_block_mapping_key(self): + if self.check_token(KeyToken): + token = self.get_token() + if not self.check_token(KeyToken, ValueToken, BlockEndToken): + self.states.append(self.parse_block_mapping_value) + return self.parse_block_node_or_indentless_sequence() + else: + self.state = self.parse_block_mapping_value + return self.process_empty_scalar(token.end_mark) + if not self.check_token(BlockEndToken): + token = self.peek_token() + raise ParserError("while parsing a block mapping", self.marks[-1], + "expected , but found %r" % token.id, token.start_mark) + token = self.get_token() + event = MappingEndEvent(token.start_mark, token.end_mark) + self.state = self.states.pop() + self.marks.pop() + return event + + def parse_block_mapping_value(self): + if self.check_token(ValueToken): + token = self.get_token() + if not self.check_token(KeyToken, ValueToken, BlockEndToken): + self.states.append(self.parse_block_mapping_key) + return self.parse_block_node_or_indentless_sequence() + else: + self.state = self.parse_block_mapping_key + return self.process_empty_scalar(token.end_mark) + else: + self.state = self.parse_block_mapping_key + token = self.peek_token() + return self.process_empty_scalar(token.start_mark) + + # flow_sequence ::= FLOW-SEQUENCE-START + # (flow_sequence_entry FLOW-ENTRY)* + # flow_sequence_entry? + # FLOW-SEQUENCE-END + # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + # + # Note that while production rules for both flow_sequence_entry and + # flow_mapping_entry are equal, their interpretations are different. + # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?` + # generate an inline mapping (set syntax). + + def parse_flow_sequence_first_entry(self): + token = self.get_token() + self.marks.append(token.start_mark) + return self.parse_flow_sequence_entry(first=True) + + def parse_flow_sequence_entry(self, first=False): + if not self.check_token(FlowSequenceEndToken): + if not first: + if self.check_token(FlowEntryToken): + self.get_token() + else: + token = self.peek_token() + raise ParserError("while parsing a flow sequence", self.marks[-1], + "expected ',' or ']', but got %r" % token.id, token.start_mark) + + if self.check_token(KeyToken): + token = self.peek_token() + event = MappingStartEvent(None, None, True, + token.start_mark, token.end_mark, + flow_style=True) + self.state = self.parse_flow_sequence_entry_mapping_key + return event + elif not self.check_token(FlowSequenceEndToken): + self.states.append(self.parse_flow_sequence_entry) + return self.parse_flow_node() + token = self.get_token() + event = SequenceEndEvent(token.start_mark, token.end_mark) + self.state = self.states.pop() + self.marks.pop() + return event + + def parse_flow_sequence_entry_mapping_key(self): + token = self.get_token() + if not self.check_token(ValueToken, + FlowEntryToken, FlowSequenceEndToken): + self.states.append(self.parse_flow_sequence_entry_mapping_value) + return self.parse_flow_node() + else: + self.state = self.parse_flow_sequence_entry_mapping_value + return self.process_empty_scalar(token.end_mark) + + def parse_flow_sequence_entry_mapping_value(self): + if self.check_token(ValueToken): + token = self.get_token() + if not self.check_token(FlowEntryToken, FlowSequenceEndToken): + self.states.append(self.parse_flow_sequence_entry_mapping_end) + return self.parse_flow_node() + else: + self.state = self.parse_flow_sequence_entry_mapping_end + return self.process_empty_scalar(token.end_mark) + else: + self.state = self.parse_flow_sequence_entry_mapping_end + token = self.peek_token() + return self.process_empty_scalar(token.start_mark) + + def parse_flow_sequence_entry_mapping_end(self): + self.state = self.parse_flow_sequence_entry + token = self.peek_token() + return MappingEndEvent(token.start_mark, token.start_mark) + + # flow_mapping ::= FLOW-MAPPING-START + # (flow_mapping_entry FLOW-ENTRY)* + # flow_mapping_entry? + # FLOW-MAPPING-END + # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + + def parse_flow_mapping_first_key(self): + token = self.get_token() + self.marks.append(token.start_mark) + return self.parse_flow_mapping_key(first=True) + + def parse_flow_mapping_key(self, first=False): + if not self.check_token(FlowMappingEndToken): + if not first: + if self.check_token(FlowEntryToken): + self.get_token() + else: + token = self.peek_token() + raise ParserError("while parsing a flow mapping", self.marks[-1], + "expected ',' or '}', but got %r" % token.id, token.start_mark) + if self.check_token(KeyToken): + token = self.get_token() + if not self.check_token(ValueToken, + FlowEntryToken, FlowMappingEndToken): + self.states.append(self.parse_flow_mapping_value) + return self.parse_flow_node() + else: + self.state = self.parse_flow_mapping_value + return self.process_empty_scalar(token.end_mark) + elif not self.check_token(FlowMappingEndToken): + self.states.append(self.parse_flow_mapping_empty_value) + return self.parse_flow_node() + token = self.get_token() + event = MappingEndEvent(token.start_mark, token.end_mark) + self.state = self.states.pop() + self.marks.pop() + return event + + def parse_flow_mapping_value(self): + if self.check_token(ValueToken): + token = self.get_token() + if not self.check_token(FlowEntryToken, FlowMappingEndToken): + self.states.append(self.parse_flow_mapping_key) + return self.parse_flow_node() + else: + self.state = self.parse_flow_mapping_key + return self.process_empty_scalar(token.end_mark) + else: + self.state = self.parse_flow_mapping_key + token = self.peek_token() + return self.process_empty_scalar(token.start_mark) + + def parse_flow_mapping_empty_value(self): + self.state = self.parse_flow_mapping_key + return self.process_empty_scalar(self.peek_token().start_mark) + + def process_empty_scalar(self, mark): + return ScalarEvent(None, None, (True, False), u'', mark, mark) + diff --git a/python/ext-libs/yaml/reader.py b/python/ext-libs/yaml/reader.py new file mode 100755 index 000000000000..3249e6b9f51d --- /dev/null +++ b/python/ext-libs/yaml/reader.py @@ -0,0 +1,190 @@ +# This module contains abstractions for the input stream. You don't have to +# looks further, there are no pretty code. +# +# We define two classes here. +# +# Mark(source, line, column) +# It's just a record and its only use is producing nice error messages. +# Parser does not use it for any other purposes. +# +# Reader(source, data) +# Reader determines the encoding of `data` and converts it to unicode. +# Reader provides the following methods and attributes: +# reader.peek(length=1) - return the next `length` characters +# reader.forward(length=1) - move the current position to `length` characters. +# reader.index - the number of the current character. +# reader.line, stream.column - the line and the column of the current character. + +__all__ = ['Reader', 'ReaderError'] + +from error import YAMLError, Mark + +import codecs, re + +class ReaderError(YAMLError): + + def __init__(self, name, position, character, encoding, reason): + self.name = name + self.character = character + self.position = position + self.encoding = encoding + self.reason = reason + + def __str__(self): + if isinstance(self.character, str): + return "'%s' codec can't decode byte #x%02x: %s\n" \ + " in \"%s\", position %d" \ + % (self.encoding, ord(self.character), self.reason, + self.name, self.position) + else: + return "unacceptable character #x%04x: %s\n" \ + " in \"%s\", position %d" \ + % (self.character, self.reason, + self.name, self.position) + +class Reader(object): + # Reader: + # - determines the data encoding and converts it to unicode, + # - checks if characters are in allowed range, + # - adds '\0' to the end. + + # Reader accepts + # - a `str` object, + # - a `unicode` object, + # - a file-like object with its `read` method returning `str`, + # - a file-like object with its `read` method returning `unicode`. + + # Yeah, it's ugly and slow. + + def __init__(self, stream): + self.name = None + self.stream = None + self.stream_pointer = 0 + self.eof = True + self.buffer = u'' + self.pointer = 0 + self.raw_buffer = None + self.raw_decode = None + self.encoding = None + self.index = 0 + self.line = 0 + self.column = 0 + if isinstance(stream, unicode): + self.name = "" + self.check_printable(stream) + self.buffer = stream+u'\0' + elif isinstance(stream, str): + self.name = "" + self.raw_buffer = stream + self.determine_encoding() + else: + self.stream = stream + self.name = getattr(stream, 'name', "") + self.eof = False + self.raw_buffer = '' + self.determine_encoding() + + def peek(self, index=0): + try: + return self.buffer[self.pointer+index] + except IndexError: + self.update(index+1) + return self.buffer[self.pointer+index] + + def prefix(self, length=1): + if self.pointer+length >= len(self.buffer): + self.update(length) + return self.buffer[self.pointer:self.pointer+length] + + def forward(self, length=1): + if self.pointer+length+1 >= len(self.buffer): + self.update(length+1) + while length: + ch = self.buffer[self.pointer] + self.pointer += 1 + self.index += 1 + if ch in u'\n\x85\u2028\u2029' \ + or (ch == u'\r' and self.buffer[self.pointer] != u'\n'): + self.line += 1 + self.column = 0 + elif ch != u'\uFEFF': + self.column += 1 + length -= 1 + + def get_mark(self): + if self.stream is None: + return Mark(self.name, self.index, self.line, self.column, + self.buffer, self.pointer) + else: + return Mark(self.name, self.index, self.line, self.column, + None, None) + + def determine_encoding(self): + while not self.eof and len(self.raw_buffer) < 2: + self.update_raw() + if not isinstance(self.raw_buffer, unicode): + if self.raw_buffer.startswith(codecs.BOM_UTF16_LE): + self.raw_decode = codecs.utf_16_le_decode + self.encoding = 'utf-16-le' + elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE): + self.raw_decode = codecs.utf_16_be_decode + self.encoding = 'utf-16-be' + else: + self.raw_decode = codecs.utf_8_decode + self.encoding = 'utf-8' + self.update(1) + + NON_PRINTABLE = re.compile(u'[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD]') + def check_printable(self, data): + match = self.NON_PRINTABLE.search(data) + if match: + character = match.group() + position = self.index+(len(self.buffer)-self.pointer)+match.start() + raise ReaderError(self.name, position, ord(character), + 'unicode', "special characters are not allowed") + + def update(self, length): + if self.raw_buffer is None: + return + self.buffer = self.buffer[self.pointer:] + self.pointer = 0 + while len(self.buffer) < length: + if not self.eof: + self.update_raw() + if self.raw_decode is not None: + try: + data, converted = self.raw_decode(self.raw_buffer, + 'strict', self.eof) + except UnicodeDecodeError, exc: + character = exc.object[exc.start] + if self.stream is not None: + position = self.stream_pointer-len(self.raw_buffer)+exc.start + else: + position = exc.start + raise ReaderError(self.name, position, character, + exc.encoding, exc.reason) + else: + data = self.raw_buffer + converted = len(data) + self.check_printable(data) + self.buffer += data + self.raw_buffer = self.raw_buffer[converted:] + if self.eof: + self.buffer += u'\0' + self.raw_buffer = None + break + + def update_raw(self, size=1024): + data = self.stream.read(size) + if data: + self.raw_buffer += data + self.stream_pointer += len(data) + else: + self.eof = True + +#try: +# import psyco +# psyco.bind(Reader) +#except ImportError: +# pass + diff --git a/python/ext-libs/yaml/representer.py b/python/ext-libs/yaml/representer.py new file mode 100755 index 000000000000..5f4fc70dbc0b --- /dev/null +++ b/python/ext-libs/yaml/representer.py @@ -0,0 +1,484 @@ + +__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer', + 'RepresenterError'] + +from error import * +from nodes import * + +import datetime + +import sys, copy_reg, types + +class RepresenterError(YAMLError): + pass + +class BaseRepresenter(object): + + yaml_representers = {} + yaml_multi_representers = {} + + def __init__(self, default_style=None, default_flow_style=None): + self.default_style = default_style + self.default_flow_style = default_flow_style + self.represented_objects = {} + self.object_keeper = [] + self.alias_key = None + + def represent(self, data): + node = self.represent_data(data) + self.serialize(node) + self.represented_objects = {} + self.object_keeper = [] + self.alias_key = None + + def get_classobj_bases(self, cls): + bases = [cls] + for base in cls.__bases__: + bases.extend(self.get_classobj_bases(base)) + return bases + + def represent_data(self, data): + if self.ignore_aliases(data): + self.alias_key = None + else: + self.alias_key = id(data) + if self.alias_key is not None: + if self.alias_key in self.represented_objects: + node = self.represented_objects[self.alias_key] + #if node is None: + # raise RepresenterError("recursive objects are not allowed: %r" % data) + return node + #self.represented_objects[alias_key] = None + self.object_keeper.append(data) + data_types = type(data).__mro__ + if type(data) is types.InstanceType: + data_types = self.get_classobj_bases(data.__class__)+list(data_types) + if data_types[0] in self.yaml_representers: + node = self.yaml_representers[data_types[0]](self, data) + else: + for data_type in data_types: + if data_type in self.yaml_multi_representers: + node = self.yaml_multi_representers[data_type](self, data) + break + else: + if None in self.yaml_multi_representers: + node = self.yaml_multi_representers[None](self, data) + elif None in self.yaml_representers: + node = self.yaml_representers[None](self, data) + else: + node = ScalarNode(None, unicode(data)) + #if alias_key is not None: + # self.represented_objects[alias_key] = node + return node + + def add_representer(cls, data_type, representer): + if not 'yaml_representers' in cls.__dict__: + cls.yaml_representers = cls.yaml_representers.copy() + cls.yaml_representers[data_type] = representer + add_representer = classmethod(add_representer) + + def add_multi_representer(cls, data_type, representer): + if not 'yaml_multi_representers' in cls.__dict__: + cls.yaml_multi_representers = cls.yaml_multi_representers.copy() + cls.yaml_multi_representers[data_type] = representer + add_multi_representer = classmethod(add_multi_representer) + + def represent_scalar(self, tag, value, style=None): + if style is None: + style = self.default_style + node = ScalarNode(tag, value, style=style) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + return node + + def represent_sequence(self, tag, sequence, flow_style=None): + value = [] + node = SequenceNode(tag, value, flow_style=flow_style) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + best_style = True + for item in sequence: + node_item = self.represent_data(item) + if not (isinstance(node_item, ScalarNode) and not node_item.style): + best_style = False + value.append(node_item) + if flow_style is None: + if self.default_flow_style is not None: + node.flow_style = self.default_flow_style + else: + node.flow_style = best_style + return node + + def represent_mapping(self, tag, mapping, flow_style=None): + value = [] + node = MappingNode(tag, value, flow_style=flow_style) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + best_style = True + if hasattr(mapping, 'items'): + mapping = mapping.items() + mapping.sort() + for item_key, item_value in mapping: + node_key = self.represent_data(item_key) + node_value = self.represent_data(item_value) + if not (isinstance(node_key, ScalarNode) and not node_key.style): + best_style = False + if not (isinstance(node_value, ScalarNode) and not node_value.style): + best_style = False + value.append((node_key, node_value)) + if flow_style is None: + if self.default_flow_style is not None: + node.flow_style = self.default_flow_style + else: + node.flow_style = best_style + return node + + def ignore_aliases(self, data): + return False + +class SafeRepresenter(BaseRepresenter): + + def ignore_aliases(self, data): + if data in [None, ()]: + return True + if isinstance(data, (str, unicode, bool, int, float)): + return True + + def represent_none(self, data): + return self.represent_scalar(u'tag:yaml.org,2002:null', + u'null') + + def represent_str(self, data): + tag = None + style = None + try: + data = unicode(data, 'ascii') + tag = u'tag:yaml.org,2002:str' + except UnicodeDecodeError: + try: + data = unicode(data, 'utf-8') + tag = u'tag:yaml.org,2002:str' + except UnicodeDecodeError: + data = data.encode('base64') + tag = u'tag:yaml.org,2002:binary' + style = '|' + return self.represent_scalar(tag, data, style=style) + + def represent_unicode(self, data): + return self.represent_scalar(u'tag:yaml.org,2002:str', data) + + def represent_bool(self, data): + if data: + value = u'true' + else: + value = u'false' + return self.represent_scalar(u'tag:yaml.org,2002:bool', value) + + def represent_int(self, data): + return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data)) + + def represent_long(self, data): + return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data)) + + inf_value = 1e300 + while repr(inf_value) != repr(inf_value*inf_value): + inf_value *= inf_value + + def represent_float(self, data): + if data != data or (data == 0.0 and data == 1.0): + value = u'.nan' + elif data == self.inf_value: + value = u'.inf' + elif data == -self.inf_value: + value = u'-.inf' + else: + value = unicode(repr(data)).lower() + # Note that in some cases `repr(data)` represents a float number + # without the decimal parts. For instance: + # >>> repr(1e17) + # '1e17' + # Unfortunately, this is not a valid float representation according + # to the definition of the `!!float` tag. We fix this by adding + # '.0' before the 'e' symbol. + if u'.' not in value and u'e' in value: + value = value.replace(u'e', u'.0e', 1) + return self.represent_scalar(u'tag:yaml.org,2002:float', value) + + def represent_list(self, data): + #pairs = (len(data) > 0 and isinstance(data, list)) + #if pairs: + # for item in data: + # if not isinstance(item, tuple) or len(item) != 2: + # pairs = False + # break + #if not pairs: + return self.represent_sequence(u'tag:yaml.org,2002:seq', data) + #value = [] + #for item_key, item_value in data: + # value.append(self.represent_mapping(u'tag:yaml.org,2002:map', + # [(item_key, item_value)])) + #return SequenceNode(u'tag:yaml.org,2002:pairs', value) + + def represent_dict(self, data): + return self.represent_mapping(u'tag:yaml.org,2002:map', data) + + def represent_set(self, data): + value = {} + for key in data: + value[key] = None + return self.represent_mapping(u'tag:yaml.org,2002:set', value) + + def represent_date(self, data): + value = unicode(data.isoformat()) + return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value) + + def represent_datetime(self, data): + value = unicode(data.isoformat(' ')) + return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value) + + def represent_yaml_object(self, tag, data, cls, flow_style=None): + if hasattr(data, '__getstate__'): + state = data.__getstate__() + else: + state = data.__dict__.copy() + return self.represent_mapping(tag, state, flow_style=flow_style) + + def represent_undefined(self, data): + raise RepresenterError("cannot represent an object: %s" % data) + +SafeRepresenter.add_representer(type(None), + SafeRepresenter.represent_none) + +SafeRepresenter.add_representer(str, + SafeRepresenter.represent_str) + +SafeRepresenter.add_representer(unicode, + SafeRepresenter.represent_unicode) + +SafeRepresenter.add_representer(bool, + SafeRepresenter.represent_bool) + +SafeRepresenter.add_representer(int, + SafeRepresenter.represent_int) + +SafeRepresenter.add_representer(long, + SafeRepresenter.represent_long) + +SafeRepresenter.add_representer(float, + SafeRepresenter.represent_float) + +SafeRepresenter.add_representer(list, + SafeRepresenter.represent_list) + +SafeRepresenter.add_representer(tuple, + SafeRepresenter.represent_list) + +SafeRepresenter.add_representer(dict, + SafeRepresenter.represent_dict) + +SafeRepresenter.add_representer(set, + SafeRepresenter.represent_set) + +SafeRepresenter.add_representer(datetime.date, + SafeRepresenter.represent_date) + +SafeRepresenter.add_representer(datetime.datetime, + SafeRepresenter.represent_datetime) + +SafeRepresenter.add_representer(None, + SafeRepresenter.represent_undefined) + +class Representer(SafeRepresenter): + + def represent_str(self, data): + tag = None + style = None + try: + data = unicode(data, 'ascii') + tag = u'tag:yaml.org,2002:str' + except UnicodeDecodeError: + try: + data = unicode(data, 'utf-8') + tag = u'tag:yaml.org,2002:python/str' + except UnicodeDecodeError: + data = data.encode('base64') + tag = u'tag:yaml.org,2002:binary' + style = '|' + return self.represent_scalar(tag, data, style=style) + + def represent_unicode(self, data): + tag = None + try: + data.encode('ascii') + tag = u'tag:yaml.org,2002:python/unicode' + except UnicodeEncodeError: + tag = u'tag:yaml.org,2002:str' + return self.represent_scalar(tag, data) + + def represent_long(self, data): + tag = u'tag:yaml.org,2002:int' + if int(data) is not data: + tag = u'tag:yaml.org,2002:python/long' + return self.represent_scalar(tag, unicode(data)) + + def represent_complex(self, data): + if data.imag == 0.0: + data = u'%r' % data.real + elif data.real == 0.0: + data = u'%rj' % data.imag + elif data.imag > 0: + data = u'%r+%rj' % (data.real, data.imag) + else: + data = u'%r%rj' % (data.real, data.imag) + return self.represent_scalar(u'tag:yaml.org,2002:python/complex', data) + + def represent_tuple(self, data): + return self.represent_sequence(u'tag:yaml.org,2002:python/tuple', data) + + def represent_name(self, data): + name = u'%s.%s' % (data.__module__, data.__name__) + return self.represent_scalar(u'tag:yaml.org,2002:python/name:'+name, u'') + + def represent_module(self, data): + return self.represent_scalar( + u'tag:yaml.org,2002:python/module:'+data.__name__, u'') + + def represent_instance(self, data): + # For instances of classic classes, we use __getinitargs__ and + # __getstate__ to serialize the data. + + # If data.__getinitargs__ exists, the object must be reconstructed by + # calling cls(**args), where args is a tuple returned by + # __getinitargs__. Otherwise, the cls.__init__ method should never be + # called and the class instance is created by instantiating a trivial + # class and assigning to the instance's __class__ variable. + + # If data.__getstate__ exists, it returns the state of the object. + # Otherwise, the state of the object is data.__dict__. + + # We produce either a !!python/object or !!python/object/new node. + # If data.__getinitargs__ does not exist and state is a dictionary, we + # produce a !!python/object node . Otherwise we produce a + # !!python/object/new node. + + cls = data.__class__ + class_name = u'%s.%s' % (cls.__module__, cls.__name__) + args = None + state = None + if hasattr(data, '__getinitargs__'): + args = list(data.__getinitargs__()) + if hasattr(data, '__getstate__'): + state = data.__getstate__() + else: + state = data.__dict__ + if args is None and isinstance(state, dict): + return self.represent_mapping( + u'tag:yaml.org,2002:python/object:'+class_name, state) + if isinstance(state, dict) and not state: + return self.represent_sequence( + u'tag:yaml.org,2002:python/object/new:'+class_name, args) + value = {} + if args: + value['args'] = args + value['state'] = state + return self.represent_mapping( + u'tag:yaml.org,2002:python/object/new:'+class_name, value) + + def represent_object(self, data): + # We use __reduce__ API to save the data. data.__reduce__ returns + # a tuple of length 2-5: + # (function, args, state, listitems, dictitems) + + # For reconstructing, we calls function(*args), then set its state, + # listitems, and dictitems if they are not None. + + # A special case is when function.__name__ == '__newobj__'. In this + # case we create the object with args[0].__new__(*args). + + # Another special case is when __reduce__ returns a string - we don't + # support it. + + # We produce a !!python/object, !!python/object/new or + # !!python/object/apply node. + + cls = type(data) + if cls in copy_reg.dispatch_table: + reduce = copy_reg.dispatch_table[cls](data) + elif hasattr(data, '__reduce_ex__'): + reduce = data.__reduce_ex__(2) + elif hasattr(data, '__reduce__'): + reduce = data.__reduce__() + else: + raise RepresenterError("cannot represent object: %r" % data) + reduce = (list(reduce)+[None]*5)[:5] + function, args, state, listitems, dictitems = reduce + args = list(args) + if state is None: + state = {} + if listitems is not None: + listitems = list(listitems) + if dictitems is not None: + dictitems = dict(dictitems) + if function.__name__ == '__newobj__': + function = args[0] + args = args[1:] + tag = u'tag:yaml.org,2002:python/object/new:' + newobj = True + else: + tag = u'tag:yaml.org,2002:python/object/apply:' + newobj = False + function_name = u'%s.%s' % (function.__module__, function.__name__) + if not args and not listitems and not dictitems \ + and isinstance(state, dict) and newobj: + return self.represent_mapping( + u'tag:yaml.org,2002:python/object:'+function_name, state) + if not listitems and not dictitems \ + and isinstance(state, dict) and not state: + return self.represent_sequence(tag+function_name, args) + value = {} + if args: + value['args'] = args + if state or not isinstance(state, dict): + value['state'] = state + if listitems: + value['listitems'] = listitems + if dictitems: + value['dictitems'] = dictitems + return self.represent_mapping(tag+function_name, value) + +Representer.add_representer(str, + Representer.represent_str) + +Representer.add_representer(unicode, + Representer.represent_unicode) + +Representer.add_representer(long, + Representer.represent_long) + +Representer.add_representer(complex, + Representer.represent_complex) + +Representer.add_representer(tuple, + Representer.represent_tuple) + +Representer.add_representer(type, + Representer.represent_name) + +Representer.add_representer(types.ClassType, + Representer.represent_name) + +Representer.add_representer(types.FunctionType, + Representer.represent_name) + +Representer.add_representer(types.BuiltinFunctionType, + Representer.represent_name) + +Representer.add_representer(types.ModuleType, + Representer.represent_module) + +Representer.add_multi_representer(types.InstanceType, + Representer.represent_instance) + +Representer.add_multi_representer(object, + Representer.represent_object) + diff --git a/python/ext-libs/yaml/resolver.py b/python/ext-libs/yaml/resolver.py new file mode 100755 index 000000000000..6b5ab87596ed --- /dev/null +++ b/python/ext-libs/yaml/resolver.py @@ -0,0 +1,224 @@ + +__all__ = ['BaseResolver', 'Resolver'] + +from error import * +from nodes import * + +import re + +class ResolverError(YAMLError): + pass + +class BaseResolver(object): + + DEFAULT_SCALAR_TAG = u'tag:yaml.org,2002:str' + DEFAULT_SEQUENCE_TAG = u'tag:yaml.org,2002:seq' + DEFAULT_MAPPING_TAG = u'tag:yaml.org,2002:map' + + yaml_implicit_resolvers = {} + yaml_path_resolvers = {} + + def __init__(self): + self.resolver_exact_paths = [] + self.resolver_prefix_paths = [] + + def add_implicit_resolver(cls, tag, regexp, first): + if not 'yaml_implicit_resolvers' in cls.__dict__: + cls.yaml_implicit_resolvers = cls.yaml_implicit_resolvers.copy() + if first is None: + first = [None] + for ch in first: + cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp)) + add_implicit_resolver = classmethod(add_implicit_resolver) + + def add_path_resolver(cls, tag, path, kind=None): + # Note: `add_path_resolver` is experimental. The API could be changed. + # `new_path` is a pattern that is matched against the path from the + # root to the node that is being considered. `node_path` elements are + # tuples `(node_check, index_check)`. `node_check` is a node class: + # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None` + # matches any kind of a node. `index_check` could be `None`, a boolean + # value, a string value, or a number. `None` and `False` match against + # any _value_ of sequence and mapping nodes. `True` matches against + # any _key_ of a mapping node. A string `index_check` matches against + # a mapping value that corresponds to a scalar key which content is + # equal to the `index_check` value. An integer `index_check` matches + # against a sequence value with the index equal to `index_check`. + if not 'yaml_path_resolvers' in cls.__dict__: + cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy() + new_path = [] + for element in path: + if isinstance(element, (list, tuple)): + if len(element) == 2: + node_check, index_check = element + elif len(element) == 1: + node_check = element[0] + index_check = True + else: + raise ResolverError("Invalid path element: %s" % element) + else: + node_check = None + index_check = element + if node_check is str: + node_check = ScalarNode + elif node_check is list: + node_check = SequenceNode + elif node_check is dict: + node_check = MappingNode + elif node_check not in [ScalarNode, SequenceNode, MappingNode] \ + and not isinstance(node_check, basestring) \ + and node_check is not None: + raise ResolverError("Invalid node checker: %s" % node_check) + if not isinstance(index_check, (basestring, int)) \ + and index_check is not None: + raise ResolverError("Invalid index checker: %s" % index_check) + new_path.append((node_check, index_check)) + if kind is str: + kind = ScalarNode + elif kind is list: + kind = SequenceNode + elif kind is dict: + kind = MappingNode + elif kind not in [ScalarNode, SequenceNode, MappingNode] \ + and kind is not None: + raise ResolverError("Invalid node kind: %s" % kind) + cls.yaml_path_resolvers[tuple(new_path), kind] = tag + add_path_resolver = classmethod(add_path_resolver) + + def descend_resolver(self, current_node, current_index): + if not self.yaml_path_resolvers: + return + exact_paths = {} + prefix_paths = [] + if current_node: + depth = len(self.resolver_prefix_paths) + for path, kind in self.resolver_prefix_paths[-1]: + if self.check_resolver_prefix(depth, path, kind, + current_node, current_index): + if len(path) > depth: + prefix_paths.append((path, kind)) + else: + exact_paths[kind] = self.yaml_path_resolvers[path, kind] + else: + for path, kind in self.yaml_path_resolvers: + if not path: + exact_paths[kind] = self.yaml_path_resolvers[path, kind] + else: + prefix_paths.append((path, kind)) + self.resolver_exact_paths.append(exact_paths) + self.resolver_prefix_paths.append(prefix_paths) + + def ascend_resolver(self): + if not self.yaml_path_resolvers: + return + self.resolver_exact_paths.pop() + self.resolver_prefix_paths.pop() + + def check_resolver_prefix(self, depth, path, kind, + current_node, current_index): + node_check, index_check = path[depth-1] + if isinstance(node_check, basestring): + if current_node.tag != node_check: + return + elif node_check is not None: + if not isinstance(current_node, node_check): + return + if index_check is True and current_index is not None: + return + if (index_check is False or index_check is None) \ + and current_index is None: + return + if isinstance(index_check, basestring): + if not (isinstance(current_index, ScalarNode) + and index_check == current_index.value): + return + elif isinstance(index_check, int) and not isinstance(index_check, bool): + if index_check != current_index: + return + return True + + def resolve(self, kind, value, implicit): + if kind is ScalarNode and implicit[0]: + if value == u'': + resolvers = self.yaml_implicit_resolvers.get(u'', []) + else: + resolvers = self.yaml_implicit_resolvers.get(value[0], []) + resolvers += self.yaml_implicit_resolvers.get(None, []) + for tag, regexp in resolvers: + if regexp.match(value): + return tag + implicit = implicit[1] + if self.yaml_path_resolvers: + exact_paths = self.resolver_exact_paths[-1] + if kind in exact_paths: + return exact_paths[kind] + if None in exact_paths: + return exact_paths[None] + if kind is ScalarNode: + return self.DEFAULT_SCALAR_TAG + elif kind is SequenceNode: + return self.DEFAULT_SEQUENCE_TAG + elif kind is MappingNode: + return self.DEFAULT_MAPPING_TAG + +class Resolver(BaseResolver): + pass + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:bool', + re.compile(ur'''^(?:yes|Yes|YES|no|No|NO + |true|True|TRUE|false|False|FALSE + |on|On|ON|off|Off|OFF)$''', re.X), + list(u'yYnNtTfFoO')) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:float', + re.compile(ur'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)? + |\.[0-9_]+(?:[eE][-+][0-9]+)? + |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]* + |[-+]?\.(?:inf|Inf|INF) + |\.(?:nan|NaN|NAN))$''', re.X), + list(u'-+0123456789.')) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:int', + re.compile(ur'''^(?:[-+]?0b[0-1_]+ + |[-+]?0[0-7_]+ + |[-+]?(?:0|[1-9][0-9_]*) + |[-+]?0x[0-9a-fA-F_]+ + |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X), + list(u'-+0123456789')) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:merge', + re.compile(ur'^(?:<<)$'), + [u'<']) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:null', + re.compile(ur'''^(?: ~ + |null|Null|NULL + | )$''', re.X), + [u'~', u'n', u'N', u'']) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:timestamp', + re.compile(ur'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9] + |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]? + (?:[Tt]|[ \t]+)[0-9][0-9]? + :[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)? + (?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X), + list(u'0123456789')) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:value', + re.compile(ur'^(?:=)$'), + [u'=']) + +# The following resolver is only for documentation purposes. It cannot work +# because plain scalars cannot start with '!', '&', or '*'. +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:yaml', + re.compile(ur'^(?:!|&|\*)$'), + list(u'!&*')) + diff --git a/python/ext-libs/yaml/scanner.py b/python/ext-libs/yaml/scanner.py new file mode 100755 index 000000000000..5228fad65ce2 --- /dev/null +++ b/python/ext-libs/yaml/scanner.py @@ -0,0 +1,1457 @@ + +# Scanner produces tokens of the following types: +# STREAM-START +# STREAM-END +# DIRECTIVE(name, value) +# DOCUMENT-START +# DOCUMENT-END +# BLOCK-SEQUENCE-START +# BLOCK-MAPPING-START +# BLOCK-END +# FLOW-SEQUENCE-START +# FLOW-MAPPING-START +# FLOW-SEQUENCE-END +# FLOW-MAPPING-END +# BLOCK-ENTRY +# FLOW-ENTRY +# KEY +# VALUE +# ALIAS(value) +# ANCHOR(value) +# TAG(value) +# SCALAR(value, plain, style) +# +# Read comments in the Scanner code for more details. +# + +__all__ = ['Scanner', 'ScannerError'] + +from error import MarkedYAMLError +from tokens import * + +class ScannerError(MarkedYAMLError): + pass + +class SimpleKey(object): + # See below simple keys treatment. + + def __init__(self, token_number, required, index, line, column, mark): + self.token_number = token_number + self.required = required + self.index = index + self.line = line + self.column = column + self.mark = mark + +class Scanner(object): + + def __init__(self): + """Initialize the scanner.""" + # It is assumed that Scanner and Reader will have a common descendant. + # Reader do the dirty work of checking for BOM and converting the + # input data to Unicode. It also adds NUL to the end. + # + # Reader supports the following methods + # self.peek(i=0) # peek the next i-th character + # self.prefix(l=1) # peek the next l characters + # self.forward(l=1) # read the next l characters and move the pointer. + + # Had we reached the end of the stream? + self.done = False + + # The number of unclosed '{' and '['. `flow_level == 0` means block + # context. + self.flow_level = 0 + + # List of processed tokens that are not yet emitted. + self.tokens = [] + + # Add the STREAM-START token. + self.fetch_stream_start() + + # Number of tokens that were emitted through the `get_token` method. + self.tokens_taken = 0 + + # The current indentation level. + self.indent = -1 + + # Past indentation levels. + self.indents = [] + + # Variables related to simple keys treatment. + + # A simple key is a key that is not denoted by the '?' indicator. + # Example of simple keys: + # --- + # block simple key: value + # ? not a simple key: + # : { flow simple key: value } + # We emit the KEY token before all keys, so when we find a potential + # simple key, we try to locate the corresponding ':' indicator. + # Simple keys should be limited to a single line and 1024 characters. + + # Can a simple key start at the current position? A simple key may + # start: + # - at the beginning of the line, not counting indentation spaces + # (in block context), + # - after '{', '[', ',' (in the flow context), + # - after '?', ':', '-' (in the block context). + # In the block context, this flag also signifies if a block collection + # may start at the current position. + self.allow_simple_key = True + + # Keep track of possible simple keys. This is a dictionary. The key + # is `flow_level`; there can be no more that one possible simple key + # for each level. The value is a SimpleKey record: + # (token_number, required, index, line, column, mark) + # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow), + # '[', or '{' tokens. + self.possible_simple_keys = {} + + # Public methods. + + def check_token(self, *choices): + # Check if the next token is one of the given types. + while self.need_more_tokens(): + self.fetch_more_tokens() + if self.tokens: + if not choices: + return True + for choice in choices: + if isinstance(self.tokens[0], choice): + return True + return False + + def peek_token(self): + # Return the next token, but do not delete if from the queue. + while self.need_more_tokens(): + self.fetch_more_tokens() + if self.tokens: + return self.tokens[0] + + def get_token(self): + # Return the next token. + while self.need_more_tokens(): + self.fetch_more_tokens() + if self.tokens: + self.tokens_taken += 1 + return self.tokens.pop(0) + + # Private methods. + + def need_more_tokens(self): + if self.done: + return False + if not self.tokens: + return True + # The current token may be a potential simple key, so we + # need to look further. + self.stale_possible_simple_keys() + if self.next_possible_simple_key() == self.tokens_taken: + return True + + def fetch_more_tokens(self): + + # Eat whitespaces and comments until we reach the next token. + self.scan_to_next_token() + + # Remove obsolete possible simple keys. + self.stale_possible_simple_keys() + + # Compare the current indentation and column. It may add some tokens + # and decrease the current indentation level. + self.unwind_indent(self.column) + + # Peek the next character. + ch = self.peek() + + # Is it the end of stream? + if ch == u'\0': + return self.fetch_stream_end() + + # Is it a directive? + if ch == u'%' and self.check_directive(): + return self.fetch_directive() + + # Is it the document start? + if ch == u'-' and self.check_document_start(): + return self.fetch_document_start() + + # Is it the document end? + if ch == u'.' and self.check_document_end(): + return self.fetch_document_end() + + # TODO: support for BOM within a stream. + #if ch == u'\uFEFF': + # return self.fetch_bom() <-- issue BOMToken + + # Note: the order of the following checks is NOT significant. + + # Is it the flow sequence start indicator? + if ch == u'[': + return self.fetch_flow_sequence_start() + + # Is it the flow mapping start indicator? + if ch == u'{': + return self.fetch_flow_mapping_start() + + # Is it the flow sequence end indicator? + if ch == u']': + return self.fetch_flow_sequence_end() + + # Is it the flow mapping end indicator? + if ch == u'}': + return self.fetch_flow_mapping_end() + + # Is it the flow entry indicator? + if ch == u',': + return self.fetch_flow_entry() + + # Is it the block entry indicator? + if ch == u'-' and self.check_block_entry(): + return self.fetch_block_entry() + + # Is it the key indicator? + if ch == u'?' and self.check_key(): + return self.fetch_key() + + # Is it the value indicator? + if ch == u':' and self.check_value(): + return self.fetch_value() + + # Is it an alias? + if ch == u'*': + return self.fetch_alias() + + # Is it an anchor? + if ch == u'&': + return self.fetch_anchor() + + # Is it a tag? + if ch == u'!': + return self.fetch_tag() + + # Is it a literal scalar? + if ch == u'|' and not self.flow_level: + return self.fetch_literal() + + # Is it a folded scalar? + if ch == u'>' and not self.flow_level: + return self.fetch_folded() + + # Is it a single quoted scalar? + if ch == u'\'': + return self.fetch_single() + + # Is it a double quoted scalar? + if ch == u'\"': + return self.fetch_double() + + # It must be a plain scalar then. + if self.check_plain(): + return self.fetch_plain() + + # No? It's an error. Let's produce a nice error message. + raise ScannerError("while scanning for the next token", None, + "found character %r that cannot start any token" + % ch.encode('utf-8'), self.get_mark()) + + # Simple keys treatment. + + def next_possible_simple_key(self): + # Return the number of the nearest possible simple key. Actually we + # don't need to loop through the whole dictionary. We may replace it + # with the following code: + # if not self.possible_simple_keys: + # return None + # return self.possible_simple_keys[ + # min(self.possible_simple_keys.keys())].token_number + min_token_number = None + for level in self.possible_simple_keys: + key = self.possible_simple_keys[level] + if min_token_number is None or key.token_number < min_token_number: + min_token_number = key.token_number + return min_token_number + + def stale_possible_simple_keys(self): + # Remove entries that are no longer possible simple keys. According to + # the YAML specification, simple keys + # - should be limited to a single line, + # - should be no longer than 1024 characters. + # Disabling this procedure will allow simple keys of any length and + # height (may cause problems if indentation is broken though). + for level in self.possible_simple_keys.keys(): + key = self.possible_simple_keys[level] + if key.line != self.line \ + or self.index-key.index > 1024: + if key.required: + raise ScannerError("while scanning a simple key", key.mark, + "could not found expected ':'", self.get_mark()) + del self.possible_simple_keys[level] + + def save_possible_simple_key(self): + # The next token may start a simple key. We check if it's possible + # and save its position. This function is called for + # ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'. + + # Check if a simple key is required at the current position. + required = not self.flow_level and self.indent == self.column + + # A simple key is required only if it is the first token in the current + # line. Therefore it is always allowed. + assert self.allow_simple_key or not required + + # The next token might be a simple key. Let's save it's number and + # position. + if self.allow_simple_key: + self.remove_possible_simple_key() + token_number = self.tokens_taken+len(self.tokens) + key = SimpleKey(token_number, required, + self.index, self.line, self.column, self.get_mark()) + self.possible_simple_keys[self.flow_level] = key + + def remove_possible_simple_key(self): + # Remove the saved possible key position at the current flow level. + if self.flow_level in self.possible_simple_keys: + key = self.possible_simple_keys[self.flow_level] + + if key.required: + raise ScannerError("while scanning a simple key", key.mark, + "could not found expected ':'", self.get_mark()) + + del self.possible_simple_keys[self.flow_level] + + # Indentation functions. + + def unwind_indent(self, column): + + ## In flow context, tokens should respect indentation. + ## Actually the condition should be `self.indent >= column` according to + ## the spec. But this condition will prohibit intuitively correct + ## constructions such as + ## key : { + ## } + #if self.flow_level and self.indent > column: + # raise ScannerError(None, None, + # "invalid intendation or unclosed '[' or '{'", + # self.get_mark()) + + # In the flow context, indentation is ignored. We make the scanner less + # restrictive then specification requires. + if self.flow_level: + return + + # In block context, we may need to issue the BLOCK-END tokens. + while self.indent > column: + mark = self.get_mark() + self.indent = self.indents.pop() + self.tokens.append(BlockEndToken(mark, mark)) + + def add_indent(self, column): + # Check if we need to increase indentation. + if self.indent < column: + self.indents.append(self.indent) + self.indent = column + return True + return False + + # Fetchers. + + def fetch_stream_start(self): + # We always add STREAM-START as the first token and STREAM-END as the + # last token. + + # Read the token. + mark = self.get_mark() + + # Add STREAM-START. + self.tokens.append(StreamStartToken(mark, mark, + encoding=self.encoding)) + + + def fetch_stream_end(self): + + # Set the current intendation to -1. + self.unwind_indent(-1) + + # Reset simple keys. + self.remove_possible_simple_key() + self.allow_simple_key = False + self.possible_simple_keys = {} + + # Read the token. + mark = self.get_mark() + + # Add STREAM-END. + self.tokens.append(StreamEndToken(mark, mark)) + + # The steam is finished. + self.done = True + + def fetch_directive(self): + + # Set the current intendation to -1. + self.unwind_indent(-1) + + # Reset simple keys. + self.remove_possible_simple_key() + self.allow_simple_key = False + + # Scan and add DIRECTIVE. + self.tokens.append(self.scan_directive()) + + def fetch_document_start(self): + self.fetch_document_indicator(DocumentStartToken) + + def fetch_document_end(self): + self.fetch_document_indicator(DocumentEndToken) + + def fetch_document_indicator(self, TokenClass): + + # Set the current intendation to -1. + self.unwind_indent(-1) + + # Reset simple keys. Note that there could not be a block collection + # after '---'. + self.remove_possible_simple_key() + self.allow_simple_key = False + + # Add DOCUMENT-START or DOCUMENT-END. + start_mark = self.get_mark() + self.forward(3) + end_mark = self.get_mark() + self.tokens.append(TokenClass(start_mark, end_mark)) + + def fetch_flow_sequence_start(self): + self.fetch_flow_collection_start(FlowSequenceStartToken) + + def fetch_flow_mapping_start(self): + self.fetch_flow_collection_start(FlowMappingStartToken) + + def fetch_flow_collection_start(self, TokenClass): + + # '[' and '{' may start a simple key. + self.save_possible_simple_key() + + # Increase the flow level. + self.flow_level += 1 + + # Simple keys are allowed after '[' and '{'. + self.allow_simple_key = True + + # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(TokenClass(start_mark, end_mark)) + + def fetch_flow_sequence_end(self): + self.fetch_flow_collection_end(FlowSequenceEndToken) + + def fetch_flow_mapping_end(self): + self.fetch_flow_collection_end(FlowMappingEndToken) + + def fetch_flow_collection_end(self, TokenClass): + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Decrease the flow level. + self.flow_level -= 1 + + # No simple keys after ']' or '}'. + self.allow_simple_key = False + + # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(TokenClass(start_mark, end_mark)) + + def fetch_flow_entry(self): + + # Simple keys are allowed after ','. + self.allow_simple_key = True + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add FLOW-ENTRY. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(FlowEntryToken(start_mark, end_mark)) + + def fetch_block_entry(self): + + # Block context needs additional checks. + if not self.flow_level: + + # Are we allowed to start a new entry? + if not self.allow_simple_key: + raise ScannerError(None, None, + "sequence entries are not allowed here", + self.get_mark()) + + # We may need to add BLOCK-SEQUENCE-START. + if self.add_indent(self.column): + mark = self.get_mark() + self.tokens.append(BlockSequenceStartToken(mark, mark)) + + # It's an error for the block entry to occur in the flow context, + # but we let the parser detect this. + else: + pass + + # Simple keys are allowed after '-'. + self.allow_simple_key = True + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add BLOCK-ENTRY. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(BlockEntryToken(start_mark, end_mark)) + + def fetch_key(self): + + # Block context needs additional checks. + if not self.flow_level: + + # Are we allowed to start a key (not nessesary a simple)? + if not self.allow_simple_key: + raise ScannerError(None, None, + "mapping keys are not allowed here", + self.get_mark()) + + # We may need to add BLOCK-MAPPING-START. + if self.add_indent(self.column): + mark = self.get_mark() + self.tokens.append(BlockMappingStartToken(mark, mark)) + + # Simple keys are allowed after '?' in the block context. + self.allow_simple_key = not self.flow_level + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add KEY. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(KeyToken(start_mark, end_mark)) + + def fetch_value(self): + + # Do we determine a simple key? + if self.flow_level in self.possible_simple_keys: + + # Add KEY. + key = self.possible_simple_keys[self.flow_level] + del self.possible_simple_keys[self.flow_level] + self.tokens.insert(key.token_number-self.tokens_taken, + KeyToken(key.mark, key.mark)) + + # If this key starts a new block mapping, we need to add + # BLOCK-MAPPING-START. + if not self.flow_level: + if self.add_indent(key.column): + self.tokens.insert(key.token_number-self.tokens_taken, + BlockMappingStartToken(key.mark, key.mark)) + + # There cannot be two simple keys one after another. + self.allow_simple_key = False + + # It must be a part of a complex key. + else: + + # Block context needs additional checks. + # (Do we really need them? They will be catched by the parser + # anyway.) + if not self.flow_level: + + # We are allowed to start a complex value if and only if + # we can start a simple key. + if not self.allow_simple_key: + raise ScannerError(None, None, + "mapping values are not allowed here", + self.get_mark()) + + # If this value starts a new block mapping, we need to add + # BLOCK-MAPPING-START. It will be detected as an error later by + # the parser. + if not self.flow_level: + if self.add_indent(self.column): + mark = self.get_mark() + self.tokens.append(BlockMappingStartToken(mark, mark)) + + # Simple keys are allowed after ':' in the block context. + self.allow_simple_key = not self.flow_level + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add VALUE. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(ValueToken(start_mark, end_mark)) + + def fetch_alias(self): + + # ALIAS could be a simple key. + self.save_possible_simple_key() + + # No simple keys after ALIAS. + self.allow_simple_key = False + + # Scan and add ALIAS. + self.tokens.append(self.scan_anchor(AliasToken)) + + def fetch_anchor(self): + + # ANCHOR could start a simple key. + self.save_possible_simple_key() + + # No simple keys after ANCHOR. + self.allow_simple_key = False + + # Scan and add ANCHOR. + self.tokens.append(self.scan_anchor(AnchorToken)) + + def fetch_tag(self): + + # TAG could start a simple key. + self.save_possible_simple_key() + + # No simple keys after TAG. + self.allow_simple_key = False + + # Scan and add TAG. + self.tokens.append(self.scan_tag()) + + def fetch_literal(self): + self.fetch_block_scalar(style='|') + + def fetch_folded(self): + self.fetch_block_scalar(style='>') + + def fetch_block_scalar(self, style): + + # A simple key may follow a block scalar. + self.allow_simple_key = True + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Scan and add SCALAR. + self.tokens.append(self.scan_block_scalar(style)) + + def fetch_single(self): + self.fetch_flow_scalar(style='\'') + + def fetch_double(self): + self.fetch_flow_scalar(style='"') + + def fetch_flow_scalar(self, style): + + # A flow scalar could be a simple key. + self.save_possible_simple_key() + + # No simple keys after flow scalars. + self.allow_simple_key = False + + # Scan and add SCALAR. + self.tokens.append(self.scan_flow_scalar(style)) + + def fetch_plain(self): + + # A plain scalar could be a simple key. + self.save_possible_simple_key() + + # No simple keys after plain scalars. But note that `scan_plain` will + # change this flag if the scan is finished at the beginning of the + # line. + self.allow_simple_key = False + + # Scan and add SCALAR. May change `allow_simple_key`. + self.tokens.append(self.scan_plain()) + + # Checkers. + + def check_directive(self): + + # DIRECTIVE: ^ '%' ... + # The '%' indicator is already checked. + if self.column == 0: + return True + + def check_document_start(self): + + # DOCUMENT-START: ^ '---' (' '|'\n') + if self.column == 0: + if self.prefix(3) == u'---' \ + and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': + return True + + def check_document_end(self): + + # DOCUMENT-END: ^ '...' (' '|'\n') + if self.column == 0: + if self.prefix(3) == u'...' \ + and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': + return True + + def check_block_entry(self): + + # BLOCK-ENTRY: '-' (' '|'\n') + return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029' + + def check_key(self): + + # KEY(flow context): '?' + if self.flow_level: + return True + + # KEY(block context): '?' (' '|'\n') + else: + return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029' + + def check_value(self): + + # VALUE(flow context): ':' + if self.flow_level: + return True + + # VALUE(block context): ':' (' '|'\n') + else: + return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029' + + def check_plain(self): + + # A plain scalar may start with any non-space character except: + # '-', '?', ':', ',', '[', ']', '{', '}', + # '#', '&', '*', '!', '|', '>', '\'', '\"', + # '%', '@', '`'. + # + # It may also start with + # '-', '?', ':' + # if it is followed by a non-space character. + # + # Note that we limit the last rule to the block context (except the + # '-' character) because we want the flow context to be space + # independent. + ch = self.peek() + return ch not in u'\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`' \ + or (self.peek(1) not in u'\0 \t\r\n\x85\u2028\u2029' + and (ch == u'-' or (not self.flow_level and ch in u'?:'))) + + # Scanners. + + def scan_to_next_token(self): + # We ignore spaces, line breaks and comments. + # If we find a line break in the block context, we set the flag + # `allow_simple_key` on. + # The byte order mark is stripped if it's the first character in the + # stream. We do not yet support BOM inside the stream as the + # specification requires. Any such mark will be considered as a part + # of the document. + # + # TODO: We need to make tab handling rules more sane. A good rule is + # Tabs cannot precede tokens + # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END, + # KEY(block), VALUE(block), BLOCK-ENTRY + # So the checking code is + # if : + # self.allow_simple_keys = False + # We also need to add the check for `allow_simple_keys == True` to + # `unwind_indent` before issuing BLOCK-END. + # Scanners for block, flow, and plain scalars need to be modified. + + if self.index == 0 and self.peek() == u'\uFEFF': + self.forward() + found = False + while not found: + while self.peek() == u' ': + self.forward() + if self.peek() == u'#': + while self.peek() not in u'\0\r\n\x85\u2028\u2029': + self.forward() + if self.scan_line_break(): + if not self.flow_level: + self.allow_simple_key = True + else: + found = True + + def scan_directive(self): + # See the specification for details. + start_mark = self.get_mark() + self.forward() + name = self.scan_directive_name(start_mark) + value = None + if name == u'YAML': + value = self.scan_yaml_directive_value(start_mark) + end_mark = self.get_mark() + elif name == u'TAG': + value = self.scan_tag_directive_value(start_mark) + end_mark = self.get_mark() + else: + end_mark = self.get_mark() + while self.peek() not in u'\0\r\n\x85\u2028\u2029': + self.forward() + self.scan_directive_ignored_line(start_mark) + return DirectiveToken(name, value, start_mark, end_mark) + + def scan_directive_name(self, start_mark): + # See the specification for details. + length = 0 + ch = self.peek(length) + while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-_': + length += 1 + ch = self.peek(length) + if not length: + raise ScannerError("while scanning a directive", start_mark, + "expected alphabetic or numeric character, but found %r" + % ch.encode('utf-8'), self.get_mark()) + value = self.prefix(length) + self.forward(length) + ch = self.peek() + if ch not in u'\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a directive", start_mark, + "expected alphabetic or numeric character, but found %r" + % ch.encode('utf-8'), self.get_mark()) + return value + + def scan_yaml_directive_value(self, start_mark): + # See the specification for details. + while self.peek() == u' ': + self.forward() + major = self.scan_yaml_directive_number(start_mark) + if self.peek() != '.': + raise ScannerError("while scanning a directive", start_mark, + "expected a digit or '.', but found %r" + % self.peek().encode('utf-8'), + self.get_mark()) + self.forward() + minor = self.scan_yaml_directive_number(start_mark) + if self.peek() not in u'\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a directive", start_mark, + "expected a digit or ' ', but found %r" + % self.peek().encode('utf-8'), + self.get_mark()) + return (major, minor) + + def scan_yaml_directive_number(self, start_mark): + # See the specification for details. + ch = self.peek() + if not (u'0' <= ch <= u'9'): + raise ScannerError("while scanning a directive", start_mark, + "expected a digit, but found %r" % ch.encode('utf-8'), + self.get_mark()) + length = 0 + while u'0' <= self.peek(length) <= u'9': + length += 1 + value = int(self.prefix(length)) + self.forward(length) + return value + + def scan_tag_directive_value(self, start_mark): + # See the specification for details. + while self.peek() == u' ': + self.forward() + handle = self.scan_tag_directive_handle(start_mark) + while self.peek() == u' ': + self.forward() + prefix = self.scan_tag_directive_prefix(start_mark) + return (handle, prefix) + + def scan_tag_directive_handle(self, start_mark): + # See the specification for details. + value = self.scan_tag_handle('directive', start_mark) + ch = self.peek() + if ch != u' ': + raise ScannerError("while scanning a directive", start_mark, + "expected ' ', but found %r" % ch.encode('utf-8'), + self.get_mark()) + return value + + def scan_tag_directive_prefix(self, start_mark): + # See the specification for details. + value = self.scan_tag_uri('directive', start_mark) + ch = self.peek() + if ch not in u'\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a directive", start_mark, + "expected ' ', but found %r" % ch.encode('utf-8'), + self.get_mark()) + return value + + def scan_directive_ignored_line(self, start_mark): + # See the specification for details. + while self.peek() == u' ': + self.forward() + if self.peek() == u'#': + while self.peek() not in u'\0\r\n\x85\u2028\u2029': + self.forward() + ch = self.peek() + if ch not in u'\0\r\n\x85\u2028\u2029': + raise ScannerError("while scanning a directive", start_mark, + "expected a comment or a line break, but found %r" + % ch.encode('utf-8'), self.get_mark()) + self.scan_line_break() + + def scan_anchor(self, TokenClass): + # The specification does not restrict characters for anchors and + # aliases. This may lead to problems, for instance, the document: + # [ *alias, value ] + # can be interpteted in two ways, as + # [ "value" ] + # and + # [ *alias , "value" ] + # Therefore we restrict aliases to numbers and ASCII letters. + start_mark = self.get_mark() + indicator = self.peek() + if indicator == u'*': + name = 'alias' + else: + name = 'anchor' + self.forward() + length = 0 + ch = self.peek(length) + while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-_': + length += 1 + ch = self.peek(length) + if not length: + raise ScannerError("while scanning an %s" % name, start_mark, + "expected alphabetic or numeric character, but found %r" + % ch.encode('utf-8'), self.get_mark()) + value = self.prefix(length) + self.forward(length) + ch = self.peek() + if ch not in u'\0 \t\r\n\x85\u2028\u2029?:,]}%@`': + raise ScannerError("while scanning an %s" % name, start_mark, + "expected alphabetic or numeric character, but found %r" + % ch.encode('utf-8'), self.get_mark()) + end_mark = self.get_mark() + return TokenClass(value, start_mark, end_mark) + + def scan_tag(self): + # See the specification for details. + start_mark = self.get_mark() + ch = self.peek(1) + if ch == u'<': + handle = None + self.forward(2) + suffix = self.scan_tag_uri('tag', start_mark) + if self.peek() != u'>': + raise ScannerError("while parsing a tag", start_mark, + "expected '>', but found %r" % self.peek().encode('utf-8'), + self.get_mark()) + self.forward() + elif ch in u'\0 \t\r\n\x85\u2028\u2029': + handle = None + suffix = u'!' + self.forward() + else: + length = 1 + use_handle = False + while ch not in u'\0 \r\n\x85\u2028\u2029': + if ch == u'!': + use_handle = True + break + length += 1 + ch = self.peek(length) + handle = u'!' + if use_handle: + handle = self.scan_tag_handle('tag', start_mark) + else: + handle = u'!' + self.forward() + suffix = self.scan_tag_uri('tag', start_mark) + ch = self.peek() + if ch not in u'\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a tag", start_mark, + "expected ' ', but found %r" % ch.encode('utf-8'), + self.get_mark()) + value = (handle, suffix) + end_mark = self.get_mark() + return TagToken(value, start_mark, end_mark) + + def scan_block_scalar(self, style): + # See the specification for details. + + if style == '>': + folded = True + else: + folded = False + + chunks = [] + start_mark = self.get_mark() + + # Scan the header. + self.forward() + chomping, increment = self.scan_block_scalar_indicators(start_mark) + self.scan_block_scalar_ignored_line(start_mark) + + # Determine the indentation level and go to the first non-empty line. + min_indent = self.indent+1 + if min_indent < 1: + min_indent = 1 + if increment is None: + breaks, max_indent, end_mark = self.scan_block_scalar_indentation() + indent = max(min_indent, max_indent) + else: + indent = min_indent+increment-1 + breaks, end_mark = self.scan_block_scalar_breaks(indent) + line_break = u'' + + # Scan the inner part of the block scalar. + while self.column == indent and self.peek() != u'\0': + chunks.extend(breaks) + leading_non_space = self.peek() not in u' \t' + length = 0 + while self.peek(length) not in u'\0\r\n\x85\u2028\u2029': + length += 1 + chunks.append(self.prefix(length)) + self.forward(length) + line_break = self.scan_line_break() + breaks, end_mark = self.scan_block_scalar_breaks(indent) + if self.column == indent and self.peek() != u'\0': + + # Unfortunately, folding rules are ambiguous. + # + # This is the folding according to the specification: + + if folded and line_break == u'\n' \ + and leading_non_space and self.peek() not in u' \t': + if not breaks: + chunks.append(u' ') + else: + chunks.append(line_break) + + # This is Clark Evans's interpretation (also in the spec + # examples): + # + #if folded and line_break == u'\n': + # if not breaks: + # if self.peek() not in ' \t': + # chunks.append(u' ') + # else: + # chunks.append(line_break) + #else: + # chunks.append(line_break) + else: + break + + # Chomp the tail. + if chomping is not False: + chunks.append(line_break) + if chomping is True: + chunks.extend(breaks) + + # We are done. + return ScalarToken(u''.join(chunks), False, start_mark, end_mark, + style) + + def scan_block_scalar_indicators(self, start_mark): + # See the specification for details. + chomping = None + increment = None + ch = self.peek() + if ch in u'+-': + if ch == '+': + chomping = True + else: + chomping = False + self.forward() + ch = self.peek() + if ch in u'0123456789': + increment = int(ch) + if increment == 0: + raise ScannerError("while scanning a block scalar", start_mark, + "expected indentation indicator in the range 1-9, but found 0", + self.get_mark()) + self.forward() + elif ch in u'0123456789': + increment = int(ch) + if increment == 0: + raise ScannerError("while scanning a block scalar", start_mark, + "expected indentation indicator in the range 1-9, but found 0", + self.get_mark()) + self.forward() + ch = self.peek() + if ch in u'+-': + if ch == '+': + chomping = True + else: + chomping = False + self.forward() + ch = self.peek() + if ch not in u'\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a block scalar", start_mark, + "expected chomping or indentation indicators, but found %r" + % ch.encode('utf-8'), self.get_mark()) + return chomping, increment + + def scan_block_scalar_ignored_line(self, start_mark): + # See the specification for details. + while self.peek() == u' ': + self.forward() + if self.peek() == u'#': + while self.peek() not in u'\0\r\n\x85\u2028\u2029': + self.forward() + ch = self.peek() + if ch not in u'\0\r\n\x85\u2028\u2029': + raise ScannerError("while scanning a block scalar", start_mark, + "expected a comment or a line break, but found %r" + % ch.encode('utf-8'), self.get_mark()) + self.scan_line_break() + + def scan_block_scalar_indentation(self): + # See the specification for details. + chunks = [] + max_indent = 0 + end_mark = self.get_mark() + while self.peek() in u' \r\n\x85\u2028\u2029': + if self.peek() != u' ': + chunks.append(self.scan_line_break()) + end_mark = self.get_mark() + else: + self.forward() + if self.column > max_indent: + max_indent = self.column + return chunks, max_indent, end_mark + + def scan_block_scalar_breaks(self, indent): + # See the specification for details. + chunks = [] + end_mark = self.get_mark() + while self.column < indent and self.peek() == u' ': + self.forward() + while self.peek() in u'\r\n\x85\u2028\u2029': + chunks.append(self.scan_line_break()) + end_mark = self.get_mark() + while self.column < indent and self.peek() == u' ': + self.forward() + return chunks, end_mark + + def scan_flow_scalar(self, style): + # See the specification for details. + # Note that we loose indentation rules for quoted scalars. Quoted + # scalars don't need to adhere indentation because " and ' clearly + # mark the beginning and the end of them. Therefore we are less + # restrictive then the specification requires. We only need to check + # that document separators are not included in scalars. + if style == '"': + double = True + else: + double = False + chunks = [] + start_mark = self.get_mark() + quote = self.peek() + self.forward() + chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark)) + while self.peek() != quote: + chunks.extend(self.scan_flow_scalar_spaces(double, start_mark)) + chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark)) + self.forward() + end_mark = self.get_mark() + return ScalarToken(u''.join(chunks), False, start_mark, end_mark, + style) + + ESCAPE_REPLACEMENTS = { + u'0': u'\0', + u'a': u'\x07', + u'b': u'\x08', + u't': u'\x09', + u'\t': u'\x09', + u'n': u'\x0A', + u'v': u'\x0B', + u'f': u'\x0C', + u'r': u'\x0D', + u'e': u'\x1B', + u' ': u'\x20', + u'\"': u'\"', + u'\\': u'\\', + u'N': u'\x85', + u'_': u'\xA0', + u'L': u'\u2028', + u'P': u'\u2029', + } + + ESCAPE_CODES = { + u'x': 2, + u'u': 4, + u'U': 8, + } + + def scan_flow_scalar_non_spaces(self, double, start_mark): + # See the specification for details. + chunks = [] + while True: + length = 0 + while self.peek(length) not in u'\'\"\\\0 \t\r\n\x85\u2028\u2029': + length += 1 + if length: + chunks.append(self.prefix(length)) + self.forward(length) + ch = self.peek() + if not double and ch == u'\'' and self.peek(1) == u'\'': + chunks.append(u'\'') + self.forward(2) + elif (double and ch == u'\'') or (not double and ch in u'\"\\'): + chunks.append(ch) + self.forward() + elif double and ch == u'\\': + self.forward() + ch = self.peek() + if ch in self.ESCAPE_REPLACEMENTS: + chunks.append(self.ESCAPE_REPLACEMENTS[ch]) + self.forward() + elif ch in self.ESCAPE_CODES: + length = self.ESCAPE_CODES[ch] + self.forward() + for k in range(length): + if self.peek(k) not in u'0123456789ABCDEFabcdef': + raise ScannerError("while scanning a double-quoted scalar", start_mark, + "expected escape sequence of %d hexdecimal numbers, but found %r" % + (length, self.peek(k).encode('utf-8')), self.get_mark()) + code = int(self.prefix(length), 16) + chunks.append(unichr(code)) + self.forward(length) + elif ch in u'\r\n\x85\u2028\u2029': + self.scan_line_break() + chunks.extend(self.scan_flow_scalar_breaks(double, start_mark)) + else: + raise ScannerError("while scanning a double-quoted scalar", start_mark, + "found unknown escape character %r" % ch.encode('utf-8'), self.get_mark()) + else: + return chunks + + def scan_flow_scalar_spaces(self, double, start_mark): + # See the specification for details. + chunks = [] + length = 0 + while self.peek(length) in u' \t': + length += 1 + whitespaces = self.prefix(length) + self.forward(length) + ch = self.peek() + if ch == u'\0': + raise ScannerError("while scanning a quoted scalar", start_mark, + "found unexpected end of stream", self.get_mark()) + elif ch in u'\r\n\x85\u2028\u2029': + line_break = self.scan_line_break() + breaks = self.scan_flow_scalar_breaks(double, start_mark) + if line_break != u'\n': + chunks.append(line_break) + elif not breaks: + chunks.append(u' ') + chunks.extend(breaks) + else: + chunks.append(whitespaces) + return chunks + + def scan_flow_scalar_breaks(self, double, start_mark): + # See the specification for details. + chunks = [] + while True: + # Instead of checking indentation, we check for document + # separators. + prefix = self.prefix(3) + if (prefix == u'---' or prefix == u'...') \ + and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': + raise ScannerError("while scanning a quoted scalar", start_mark, + "found unexpected document separator", self.get_mark()) + while self.peek() in u' \t': + self.forward() + if self.peek() in u'\r\n\x85\u2028\u2029': + chunks.append(self.scan_line_break()) + else: + return chunks + + def scan_plain(self): + # See the specification for details. + # We add an additional restriction for the flow context: + # plain scalars in the flow context cannot contain ',', ':' and '?'. + # We also keep track of the `allow_simple_key` flag here. + # Indentation rules are loosed for the flow context. + chunks = [] + start_mark = self.get_mark() + end_mark = start_mark + indent = self.indent+1 + # We allow zero indentation for scalars, but then we need to check for + # document separators at the beginning of the line. + #if indent == 0: + # indent = 1 + spaces = [] + while True: + length = 0 + if self.peek() == u'#': + break + while True: + ch = self.peek(length) + if ch in u'\0 \t\r\n\x85\u2028\u2029' \ + or (not self.flow_level and ch == u':' and + self.peek(length+1) in u'\0 \t\r\n\x85\u2028\u2029') \ + or (self.flow_level and ch in u',:?[]{}'): + break + length += 1 + # It's not clear what we should do with ':' in the flow context. + if (self.flow_level and ch == u':' + and self.peek(length+1) not in u'\0 \t\r\n\x85\u2028\u2029,[]{}'): + self.forward(length) + raise ScannerError("while scanning a plain scalar", start_mark, + "found unexpected ':'", self.get_mark(), + "Please check http://pyyaml.org/wiki/YAMLColonInFlowContext for details.") + if length == 0: + break + self.allow_simple_key = False + chunks.extend(spaces) + chunks.append(self.prefix(length)) + self.forward(length) + end_mark = self.get_mark() + spaces = self.scan_plain_spaces(indent, start_mark) + if not spaces or self.peek() == u'#' \ + or (not self.flow_level and self.column < indent): + break + return ScalarToken(u''.join(chunks), True, start_mark, end_mark) + + def scan_plain_spaces(self, indent, start_mark): + # See the specification for details. + # The specification is really confusing about tabs in plain scalars. + # We just forbid them completely. Do not use tabs in YAML! + chunks = [] + length = 0 + while self.peek(length) in u' ': + length += 1 + whitespaces = self.prefix(length) + self.forward(length) + ch = self.peek() + if ch in u'\r\n\x85\u2028\u2029': + line_break = self.scan_line_break() + self.allow_simple_key = True + prefix = self.prefix(3) + if (prefix == u'---' or prefix == u'...') \ + and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': + return + breaks = [] + while self.peek() in u' \r\n\x85\u2028\u2029': + if self.peek() == ' ': + self.forward() + else: + breaks.append(self.scan_line_break()) + prefix = self.prefix(3) + if (prefix == u'---' or prefix == u'...') \ + and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': + return + if line_break != u'\n': + chunks.append(line_break) + elif not breaks: + chunks.append(u' ') + chunks.extend(breaks) + elif whitespaces: + chunks.append(whitespaces) + return chunks + + def scan_tag_handle(self, name, start_mark): + # See the specification for details. + # For some strange reasons, the specification does not allow '_' in + # tag handles. I have allowed it anyway. + ch = self.peek() + if ch != u'!': + raise ScannerError("while scanning a %s" % name, start_mark, + "expected '!', but found %r" % ch.encode('utf-8'), + self.get_mark()) + length = 1 + ch = self.peek(length) + if ch != u' ': + while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-_': + length += 1 + ch = self.peek(length) + if ch != u'!': + self.forward(length) + raise ScannerError("while scanning a %s" % name, start_mark, + "expected '!', but found %r" % ch.encode('utf-8'), + self.get_mark()) + length += 1 + value = self.prefix(length) + self.forward(length) + return value + + def scan_tag_uri(self, name, start_mark): + # See the specification for details. + # Note: we do not check if URI is well-formed. + chunks = [] + length = 0 + ch = self.peek(length) + while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-;/?:@&=+$,_.!~*\'()[]%': + if ch == u'%': + chunks.append(self.prefix(length)) + self.forward(length) + length = 0 + chunks.append(self.scan_uri_escapes(name, start_mark)) + else: + length += 1 + ch = self.peek(length) + if length: + chunks.append(self.prefix(length)) + self.forward(length) + length = 0 + if not chunks: + raise ScannerError("while parsing a %s" % name, start_mark, + "expected URI, but found %r" % ch.encode('utf-8'), + self.get_mark()) + return u''.join(chunks) + + def scan_uri_escapes(self, name, start_mark): + # See the specification for details. + bytes = [] + mark = self.get_mark() + while self.peek() == u'%': + self.forward() + for k in range(2): + if self.peek(k) not in u'0123456789ABCDEFabcdef': + raise ScannerError("while scanning a %s" % name, start_mark, + "expected URI escape sequence of 2 hexdecimal numbers, but found %r" % + (self.peek(k).encode('utf-8')), self.get_mark()) + bytes.append(chr(int(self.prefix(2), 16))) + self.forward(2) + try: + value = unicode(''.join(bytes), 'utf-8') + except UnicodeDecodeError, exc: + raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark) + return value + + def scan_line_break(self): + # Transforms: + # '\r\n' : '\n' + # '\r' : '\n' + # '\n' : '\n' + # '\x85' : '\n' + # '\u2028' : '\u2028' + # '\u2029 : '\u2029' + # default : '' + ch = self.peek() + if ch in u'\r\n\x85': + if self.prefix(2) == u'\r\n': + self.forward(2) + else: + self.forward() + return u'\n' + elif ch in u'\u2028\u2029': + self.forward() + return ch + return u'' + +#try: +# import psyco +# psyco.bind(Scanner) +#except ImportError: +# pass + diff --git a/python/ext-libs/yaml/serializer.py b/python/ext-libs/yaml/serializer.py new file mode 100755 index 000000000000..0bf1e96dc162 --- /dev/null +++ b/python/ext-libs/yaml/serializer.py @@ -0,0 +1,111 @@ + +__all__ = ['Serializer', 'SerializerError'] + +from error import YAMLError +from events import * +from nodes import * + +class SerializerError(YAMLError): + pass + +class Serializer(object): + + ANCHOR_TEMPLATE = u'id%03d' + + def __init__(self, encoding=None, + explicit_start=None, explicit_end=None, version=None, tags=None): + self.use_encoding = encoding + self.use_explicit_start = explicit_start + self.use_explicit_end = explicit_end + self.use_version = version + self.use_tags = tags + self.serialized_nodes = {} + self.anchors = {} + self.last_anchor_id = 0 + self.closed = None + + def open(self): + if self.closed is None: + self.emit(StreamStartEvent(encoding=self.use_encoding)) + self.closed = False + elif self.closed: + raise SerializerError("serializer is closed") + else: + raise SerializerError("serializer is already opened") + + def close(self): + if self.closed is None: + raise SerializerError("serializer is not opened") + elif not self.closed: + self.emit(StreamEndEvent()) + self.closed = True + + #def __del__(self): + # self.close() + + def serialize(self, node): + if self.closed is None: + raise SerializerError("serializer is not opened") + elif self.closed: + raise SerializerError("serializer is closed") + self.emit(DocumentStartEvent(explicit=self.use_explicit_start, + version=self.use_version, tags=self.use_tags)) + self.anchor_node(node) + self.serialize_node(node, None, None) + self.emit(DocumentEndEvent(explicit=self.use_explicit_end)) + self.serialized_nodes = {} + self.anchors = {} + self.last_anchor_id = 0 + + def anchor_node(self, node): + if node in self.anchors: + if self.anchors[node] is None: + self.anchors[node] = self.generate_anchor(node) + else: + self.anchors[node] = None + if isinstance(node, SequenceNode): + for item in node.value: + self.anchor_node(item) + elif isinstance(node, MappingNode): + for key, value in node.value: + self.anchor_node(key) + self.anchor_node(value) + + def generate_anchor(self, node): + self.last_anchor_id += 1 + return self.ANCHOR_TEMPLATE % self.last_anchor_id + + def serialize_node(self, node, parent, index): + alias = self.anchors[node] + if node in self.serialized_nodes: + self.emit(AliasEvent(alias)) + else: + self.serialized_nodes[node] = True + self.descend_resolver(parent, index) + if isinstance(node, ScalarNode): + detected_tag = self.resolve(ScalarNode, node.value, (True, False)) + default_tag = self.resolve(ScalarNode, node.value, (False, True)) + implicit = (node.tag == detected_tag), (node.tag == default_tag) + self.emit(ScalarEvent(alias, node.tag, implicit, node.value, + style=node.style)) + elif isinstance(node, SequenceNode): + implicit = (node.tag + == self.resolve(SequenceNode, node.value, True)) + self.emit(SequenceStartEvent(alias, node.tag, implicit, + flow_style=node.flow_style)) + index = 0 + for item in node.value: + self.serialize_node(item, node, index) + index += 1 + self.emit(SequenceEndEvent()) + elif isinstance(node, MappingNode): + implicit = (node.tag + == self.resolve(MappingNode, node.value, True)) + self.emit(MappingStartEvent(alias, node.tag, implicit, + flow_style=node.flow_style)) + for key, value in node.value: + self.serialize_node(key, node, None) + self.serialize_node(value, node, key) + self.emit(MappingEndEvent()) + self.ascend_resolver() + diff --git a/python/ext-libs/yaml/tokens.py b/python/ext-libs/yaml/tokens.py new file mode 100755 index 000000000000..4d0b48a394ac --- /dev/null +++ b/python/ext-libs/yaml/tokens.py @@ -0,0 +1,104 @@ + +class Token(object): + def __init__(self, start_mark, end_mark): + self.start_mark = start_mark + self.end_mark = end_mark + def __repr__(self): + attributes = [key for key in self.__dict__ + if not key.endswith('_mark')] + attributes.sort() + arguments = ', '.join(['%s=%r' % (key, getattr(self, key)) + for key in attributes]) + return '%s(%s)' % (self.__class__.__name__, arguments) + +#class BOMToken(Token): +# id = '' + +class DirectiveToken(Token): + id = '' + def __init__(self, name, value, start_mark, end_mark): + self.name = name + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + +class DocumentStartToken(Token): + id = '' + +class DocumentEndToken(Token): + id = '' + +class StreamStartToken(Token): + id = '' + def __init__(self, start_mark=None, end_mark=None, + encoding=None): + self.start_mark = start_mark + self.end_mark = end_mark + self.encoding = encoding + +class StreamEndToken(Token): + id = '' + +class BlockSequenceStartToken(Token): + id = '' + +class BlockMappingStartToken(Token): + id = '' + +class BlockEndToken(Token): + id = '' + +class FlowSequenceStartToken(Token): + id = '[' + +class FlowMappingStartToken(Token): + id = '{' + +class FlowSequenceEndToken(Token): + id = ']' + +class FlowMappingEndToken(Token): + id = '}' + +class KeyToken(Token): + id = '?' + +class ValueToken(Token): + id = ':' + +class BlockEntryToken(Token): + id = '-' + +class FlowEntryToken(Token): + id = ',' + +class AliasToken(Token): + id = '' + def __init__(self, value, start_mark, end_mark): + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + +class AnchorToken(Token): + id = '' + def __init__(self, value, start_mark, end_mark): + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + +class TagToken(Token): + id = '' + def __init__(self, value, start_mark, end_mark): + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + +class ScalarToken(Token): + id = '' + def __init__(self, value, plain, start_mark, end_mark, style=None): + self.value = value + self.plain = plain + self.start_mark = start_mark + self.end_mark = end_mark + self.style = style + diff --git a/python/plugins/processing/algs/CMakeLists.txt b/python/plugins/processing/algs/CMakeLists.txt index f4b2d4efd4d8..3d4afc15d5e5 100644 --- a/python/plugins/processing/algs/CMakeLists.txt +++ b/python/plugins/processing/algs/CMakeLists.txt @@ -1,5 +1,6 @@ FILE(GLOB PY_FILES *.py) +ADD_SUBDIRECTORY(help) ADD_SUBDIRECTORY(gdal) ADD_SUBDIRECTORY(grass) ADD_SUBDIRECTORY(grass7) diff --git a/python/plugins/processing/algs/gdal/GdalAlgorithm.py b/python/plugins/processing/algs/gdal/GdalAlgorithm.py index a1c707629242..604d2598ee55 100644 --- a/python/plugins/processing/algs/gdal/GdalAlgorithm.py +++ b/python/plugins/processing/algs/gdal/GdalAlgorithm.py @@ -48,11 +48,11 @@ def getCustomParametersDialog(self): def processAlgorithm(self, progress): GdalUtils.runGdal(self.getConsoleCommands(), progress) - def help(self): - try: - return False, "http://www.gdal.org/%s.html" % self.commandName() - except: - return False, None + def shortHelp(self): + return self._formatHelp('''This algorithm is based on the GDAL %s module. + + For more info, see the module help + ''' % (self.commandName(), self.commandName())) def commandName(self): alg = self.getCopy() diff --git a/python/plugins/processing/algs/gdal/extractprojection.py b/python/plugins/processing/algs/gdal/extractprojection.py index 2f8efd903516..104ecd0d6277 100644 --- a/python/plugins/processing/algs/gdal/extractprojection.py +++ b/python/plugins/processing/algs/gdal/extractprojection.py @@ -47,7 +47,7 @@ def defineCharacteristics(self): self.tr('Create also .prj file'), False)) def getConsoleCommands(self): - return "" + return ["extractprojection"] def processAlgorithm(self, progress): rasterPath = self.getParameterValue(self.INPUT) diff --git a/python/plugins/processing/algs/gdal/translate.py b/python/plugins/processing/algs/gdal/translate.py index e5bf9b337e29..3bac45e23b24 100644 --- a/python/plugins/processing/algs/gdal/translate.py +++ b/python/plugins/processing/algs/gdal/translate.py @@ -156,11 +156,17 @@ def getConsoleCommands(self): arguments.append('-expand') arguments.append(expand) regionCoords = projwin.split(',') - arguments.append('-projwin') - arguments.append(regionCoords[0]) - arguments.append(regionCoords[3]) - arguments.append(regionCoords[1]) - arguments.append(regionCoords[2]) + try: + projwin = [] + projwin.append('-projwin') + projwin.append(regionCoords[0]) + projwin.append(regionCoords[3]) + projwin.append(regionCoords[1]) + projwin.append(regionCoords[2]) + except IndexError: + projwin = [] + if projwin: + arguments.extend(projwin) if crsId: arguments.append('-a_srs') arguments.append(unicode(crsId)) diff --git a/python/plugins/processing/algs/grass/GrassAlgorithmProvider.py b/python/plugins/processing/algs/grass/GrassAlgorithmProvider.py index fce6888bfe92..cbc8906d747b 100644 --- a/python/plugins/processing/algs/grass/GrassAlgorithmProvider.py +++ b/python/plugins/processing/algs/grass/GrassAlgorithmProvider.py @@ -104,3 +104,6 @@ def getSupportedOutputVectorLayerExtensions(self): def getSupportedOutputRasterLayerExtensions(self): return ['tif'] + + def canBeActivated(self): + return not bool(GrassUtils.checkGrassIsInstalled()) diff --git a/python/plugins/processing/algs/grass7/Grass7AlgorithmProvider.py b/python/plugins/processing/algs/grass7/Grass7AlgorithmProvider.py index 30f08191962d..2a9a8dc6f3ee 100644 --- a/python/plugins/processing/algs/grass7/Grass7AlgorithmProvider.py +++ b/python/plugins/processing/algs/grass7/Grass7AlgorithmProvider.py @@ -110,3 +110,6 @@ def getSupportedOutputVectorLayerExtensions(self): def getSupportedOutputRasterLayerExtensions(self): return ['tif'] + + def canBeActivated(self): + return not bool(Grass7Utils.checkGrass7IsInstalled()) diff --git a/python/plugins/processing/algs/help/CMakeLists.txt b/python/plugins/processing/algs/help/CMakeLists.txt new file mode 100644 index 000000000000..ab9b09667527 --- /dev/null +++ b/python/plugins/processing/algs/help/CMakeLists.txt @@ -0,0 +1,5 @@ +FILE(GLOB PY_FILES *.py) +FILE(GLOB YAML_FILES *.yaml) + +PLUGIN_INSTALL(processing algs/help ${PY_FILES}) +PLUGIN_INSTALL(processing algs/help ${YAML_FILES}) \ No newline at end of file diff --git a/python/plugins/processing/algs/help/__init__.py b/python/plugins/processing/algs/help/__init__.py new file mode 100644 index 000000000000..7a207466afa3 --- /dev/null +++ b/python/plugins/processing/algs/help/__init__.py @@ -0,0 +1,31 @@ +import os +import yaml +from qgis.core import * +from PyQt4.QtCore import QSettings, QLocale + +def loadShortHelp(): + h = {} + path = os.path.dirname(__file__) + for f in os.listdir(path): + if f.endswith("yaml"): + filename = os.path.join(path, f) + with open(filename) as stream: + h.update(yaml.load(stream)) + version = ".".join(QGis.QGIS_VERSION.split(".")[0:2]) + overrideLocale = QSettings().value('locale/overrideFlag', False, bool) + if not overrideLocale: + locale = QLocale.system().name()[:2] + else: + locale = QSettings().value('locale/userLocale', '') + locale = locale.split("_")[0] + def replace(s): + if s is not None: + return s.replace("{qgisdocs}", "https://docs.qgis.org/%s/%s/docs" % (version, locale)) + else: + return None + h = {k:replace(v) for k,v in h.iteritems()} + return h + + +shortHelp = loadShortHelp() + diff --git a/python/plugins/processing/algs/help/qgis.yaml b/python/plugins/processing/algs/help/qgis.yaml new file mode 100644 index 000000000000..9515dabbd4ea --- /dev/null +++ b/python/plugins/processing/algs/help/qgis.yaml @@ -0,0 +1,439 @@ +qgis:addautoincrementalfield: > + This algorithm adds a new integer field to a vector layer, with a sequential value for each feature. + + This field can be used as a unique ID for features in the layer. + + The new attribute is not added to the input layer but a new layer is generated instead. + +qgis:addfieldtoattributestable: > + This algorithm adds a new attribute to a vector layer. + + The name and characteristics of the attribute are defined as parameters. + + The new attribute is not added to the input layer but a new layer is generated instead. + +qgis:adduniquevalueindexfield: > + This algorithm takes a vector layer and an attribute and adds a new numeric field. Values in this field correspond to values in the specified attribute, so features with the same value for the attribute will have the same value in the new numeric field. This creates a numeric equivalent of the specified attribute, which defines the same classes. + + The new attribute is not added to the input layer but a new layer is generated instead. + +qgis:advancedpythonfieldcalculator: > + This algorithm adds a new attribute to a vector layer, with values resulting from applying an expression to each feature. The expression is defined as a Python function. + +qgis:barplot: + + +qgis:basicstatisticsfornumericfields: > + This algorithm generates basic statistics from the analysis of a numeric field in the attribute table of a vector layer. + + Statistics are generated as an HTML file. + +qgis:basicstatisticsfortextfields: > + This algorithm generates basic statistics from the analysis of a text field in the attribute table of a vector layer. + + Statistics are generated as an HTML file. + +qgis:buildvirtualvector: > + This algorithm creates a virtual layer that contains a set of vector layer. + + The output virtual layer will not be open in the current project. + +qgis:checkvalidity: + This algorithm performs a validity check on the geometries of a vector layer. + + The geometries are classified in three groups (valid, invalid and error), and a vector layer is generated with the features in each on of this categories. + +qgis:clip: > + This algorithm clips a vector layer using the polygons of an additional polygons layer. Only the parts of the features in the input layer that falls within the polygons of the clipping layer will be added to the resulting layer. + + The attributes of the features are not modified, although properties such as area or length of the features will be modified by the clipoing operation. If such properties are stored as attributes, those attributes will have to be manually updated. + +qgis:concavehull: > + This algorithm computes the concave hull of the features in an input layer. + +qgis:convertgeometrytype: > + This algorithm generates a new layer based on an existing one, with a different type of geometry. + + Not all conversions are possible. For instance, a line layer can be converted to a point layer, but a point layer cannot be converted to a line layer. + + See the "Polygonize" or "Lines to polygons" algorithm for alternative options. + +qgis:convexhull: > + This algorithm computes the convex hull of features in a layer. + + If a field is specified, it will divide the features into classes based on that field, and compute a separate convex hull for the features in each class. + +qgis:countpointsinpolygon: > + This algorithms takes a points layer and a polygon layer and counts the number of points from the first one in each polygons of the second one. + + A new polygons layer is generated, with the exact same content as the input polygons layer, but containing an additional field with the points count corresponding to each polygon. + +qgis:countpointsinpolygonweighted: > + This algorithms takes a points layer and a polygon layer and counts the number of points from the first one in each polygons of the second one. + + An attribute is used in the points layer to assign weights to each point. + + A new polygons layer is generated, with the exact same content as the input polygons layer, but containing an additional field with the points count corresponding to each polygon. + +qgis:countuniquepointsinpolygon: > + This algorithms takes a points layer and a polygon layer and counts the number of points from the first one in each polygons of the second one. + + Points are classified based on an attribute, and if several points with the same attribute value are within the extent of the polygon, only one of them is counted. The final count of point in a polygon is, therefore, the count of different classes that are found in it. + + A new polygons layer is generated, with the exact same content as the input polygons layer, but containing an additional field with the points count corresponding to each polygon. + +qgis:createconstantrasterlayer: > + Given an input raster layer an a value, this algorithm generates a new layer with the same extent and cellsize as the input one, and all cells with the specified value. + +qgis:creategrid: + This algorith creates a vector layer with a grid convering a given extent. Features can be lines or polygons, and the shape used in the grid can be rectangles, diamond or hexagons. + + The size of each element in the grid is defined using a horizontal and vertical spaciong. + + The CRS of the output layer must be defined. The grid extent and the spacing values are supposed to be expressed in the coordinates and units of this CRS. + +qgis:createpointsalonglines: > + This algorithms creates a points layer, with points distributed along the lines of an input vector layer. the distance between points (measured along the line) is defined as a parameter. + + Start and end points can be defined, so the first and last point do not fall on the line first and last node. Start and end points are defined as distances, mesaureed from the first and last nodes of the lines, in the units of the projection used by the lines layer. + +qgis:delaunaytriangulation: > + This algorithm creates a polygon layer with the delaunay triangulation corresponding to a points layer. + +qgis:deletecolumn: > + This algorithm takes a vector layer and generates a new one that has the exact same content but without one of its columns. + +qgis:deleteduplicategeometries: > + This algorithm finds duplicated geometries and removes them. Attributes are not checked, so in case two feature have identical geometries but different attributes, only one of them will be added to the result layer. + +qgis:deleteholes: > + This algorithm takes a polygon layer and removes holes in polygons. It creates a new vector layer in which polygons with holes have been replaced by polygons with only their external ring. Attributes are not modified. + +qgis:densifygeometries: + This algorithm takes a polygon or line layer and generaates a new one in which the geometries have a larger number of vertices than the original one. + + The number of new vertices to add to each feature geometry is specified as an input parameter. + +qgis:densifygeometriesgivenaninterval: + This algorithm takes a polygon or line layer and generaates a new one in which the geometries have a larger number of vertices than the original one. + + The number of new vertices depends on the length of the geometry, and is specified as a distance between them. The distance is expressed in the same units used by the layer CRS. + +qgis:difference: > + This algorithm extracts features from the Input layer that fall outside, or partially overlap, features in the Difference layer. Input layer features that partially overlap the difference layer feature(s) are split along the boundary of the difference layer feature(s) and only the portions outside the difference layer features are retained. + + Attributes are not modified + +qgis:dissolve: > + This algorithm takes a polygon vector layer and dissolve adjacent polygons into single geometries. An attribute can be specified to dissolve only polygons belonging to the same class (having the same value for the specified attribute), or all polygons can be dissolved, considering only their geometries. + +qgis:distancematrix: > + This algorithms creates a table containing a distance matrix, with distances between all the points in a points layer. + + +qgis:distancetonearesthub: > + Given a layer with source point and another one representing destination points, this algorithm computes the distance between each source point and the closest detination one. + + The resulting layer can contain only source points with an additional field indicating the distance to the nearest point and the name of the destination point, or lines linking each source point with its nearest destination point. + +qgis:eliminatesliverpolygons: + +qgis:explodelines: > + This algorithm takes a lines layer and creates a new one in which each line is replaced is replaced by a set of lines representing the segments in the original line. Each line in the resulting layer contains only a start and an end point, with no intermediate nodes between them. + +qgis:exportaddgeometrycolumns: > + This algorithm computes geometric properties of the features in a vector layer. It generates a new vector layer with the same content as the input one, but with additional attributes in its attributes table, containing geometric measurements. + + Depending on the geometry type of the vector layer, the attributes added to the table will be different. + +qgis:extractbyattribute: > + This algorithms creates new vector layer that only contain certain features from an input layer. The criteria for adding features to the resulting layer is defined based on the values of an attribute from the input layer. + +qgis:extractbylocation: > + This algorithms creates new vector layer that only contain certain features from an input layer. The criteria for adding features to the resulting layer is defined based on the spatial relationship between each feature and the features in an additional layer. + +qgis:extractnodes: > + This algorithm takes a line or polygon layer and generates a point layer with points representing the nodes in the input lines or polygons. The attributes associated to each point are the same ones associated to the line or polygon that the point belongs to. + +qgis:fieldcalculator: > + This algorithm computes a new vector layer with the same features of the input layer, but with an additional attribute. The values of this new attribute are computed from each feature using a mathematical formula, based on te properties and attributes of the feature. + +qgis:fixeddistancebuffer: > + This algorithm computes a buffer area for all the features in an input layer, using a fixed distance. + +qgis:frequencyanalysis: > + This algorithms generates a table with frequency analysis of the values of a selected attribute from an input vector layer + +qgis:generatepointspixelcentroidsalongline: + + +qgis:generatepointspixelcentroidsinsidepolygons: + + +qgis:hublines: + + +qgis:hypsometriccurves: > + This algorithm computes hypsometric curves for an input Digital Elevation Model. Curves are produced as table files in an output folder specified by the user. + +qgis:importintopostgis: > + This algorithms imports a vector layer into a PostGIS database, creating a new table. + + Prior to this a connection between QGIS and the PostGIS database has to be created (for example with the DB Manager). + +qgis:intersection: > + This algorithm extracts the overlapping portions of features in the Input and Intersect layers. Features in the Intersection layer are assigned the attributes of the overlapping features from both the Input and Intersect layers. + + Attributes are not modified + +qgis:joinattributesbylocation: > + This algorithm takes an input vector layer and creates a new vector layer that is an extended version of the input one, with additional attributes in its attribute table. + + The additional attributes and their values are taken from a second vector layer. A spatial critera is applied to select the values from the second layer that are added to each feature from the first layer in the resulting one. + + +qgis:joinattributestable: > + This algorithm takes an input vector layer and creates a new vector layer that is an extended version of the input one, with additional attributes in its attribute table. + + The additional attributes and their values are taken from a second vector layer. An attribute is selected in each of them to define the join criteria. + +qgis:keepnbiggestparts: > + This algorithms takes a polygon layer and creates a new polygon layer in which multipart goemetries have been removed, leaving only the n largest (in terms of area) parts + +qgis:lineintersections: + This algorithm creates point features where the lines in the Intersect layer intersect the lines in the Input layer. + + An ID field is specified for each of the input layers. Each point in the resulting layer will have the ID's of both input layer, allowing to identify them. + + If no Input Unique and Intersect Unique ID fields are specified then the point features are given the values of the last field (i.e. the last field/column in the attribute table) of the intersecting lines. + +qgis:linestopolygons: + This algorithm generates a polygon layer using as polygon rings the lines from an input line layer. + + The attribute table of the output layer is the same as the one from of the input line layer. + +qgis:listuniquevalues: > + This algorithm generates a report with information about the categories found in a given attribute of a vector layer. + +qgis:meanandstandarddeviationplot: + + +qgis:meancoordinates: > + This algorithm computes a point layer with the center of mass of geometries in an input layer. + + An attribute can be specified as containing weights to be applied to each feature when computing the center of mass. + + If an attribute is selected in the parameters, features will be grouped according to values in this field. Instead of a single point with the center of mass of the whole layer, the output layer will contain a center of mass for the features in each category. + +qgis:mergevectorlayers: > + This algorithm combines two vector layer of the same geometry type into a single one. + + If attributes tables are different, the attribute table of the resulting layer will contain the attributes from both input layers + +qgis:multiparttosingleparts: > + This algorithm takes a vector layer with multipart geometries and generates a new one in which all geometries contain a single part. Features with multipart geometries are divided in as many different features as parts the geometry contain, and the same attributes are used for each of them. + +qgis:nearestneighbouranalysis: > + This algorithm performs nearest neighbout analysis for a point layer. + + Output is generated as an html file with the computed statistical values. + + +qgis:numberofuniquevaluesinclasses: > + This algorithm counts the different values that appear in a specified attributes for features of the same class. + + Classes are defined according to a given attribute. For all layers that share the same value of this attributes, the values of a second attributes are analyzed. + + The resulting layer contains the same features as the input layer, but with an additional attribute containing the count of unique values for that class. + +qgis:orientedminimumboundingbox: > + This algorithm takes a vector layer and generate a new one with the minimum rectangle that covers all the input features. + + As an alternative, the output layer can contain not just a single rectangle, but one for each input feature, representing the minimum rectangle that covers each of them. + +qgis:pointsdisplacement: + + +qgis:pointslayerfromtable: > + This algorithm generates a points layer based on the values from an input table. + + The table must contain a field with the X coordinate of each point and another one with the Y coordinate. A CRS for the output layer has to be specified, and the coordinates in the table are assumed to be expressed in the units used by that CRS. + + The attributes table of the resulting layer will be the input table. + +qgis:pointstopath: + + +qgis:polarplot: > + This algorithm generates a polar plot based on the value of an input vector layer. + + Two fields must be entered as parameters: one that define the category each feature two (to group features) and another one with the variable to plot (this has to be a numeric one) + +qgis:polygoncentroids: > + This algorithm creates a new point layer, with points representing the centroid of polygons of an input layer. + + The attributes associated to each point in the output layer are the same ones associated to the original polygon. + + +qgis:polygonfromlayerextent: > + This algorithm takes a vector layer and generate a new one with the minimum bounding box (rectangle with N-S orientation) that covers all the input features. + + As an alternative, the output layer can contain not just a single bounding box, but one for each input feature, representing the bounding box of each of them. + +qgis:polygonize: > + This algorithm takes a lines layer and creates a polygon layer, with polygons generated from the lines in the input layer. + +qgis:polygonstolines: > + This algorithm takes a polygon layer and creates a line layer, with lines representing the rings of the polygons in the input layer. + +qgis:postgisexecutesql: > + This algorithm performs a SQL database query on a PostGIS database connected to QGIS. + +qgis:randomextract: > + This algorithm takes a vector layer and generates a new one that contains only a subset of the features in the input layer. + + The subset is defined randomly, using a percentage or count value to define the total number of features in the subset. + +qgis:randomextractwithinsubsets: > + This algorithm takes a vector layer and generates a new one that contains only a subset of the features in the input layer. + + The subset is defined randomly, using a percentage or count value to define the total number of features in the subset. + + The percentage/count value is not applied to the whole layer, but instead to each category. Categories are defined according to a given attribute, which is also specified as an input parameter for the algorithm. + +qgis:randompointsalongline: > + This algorithm creates a new point layer, with points placed in the lines of another layer. + + For each line in the input layer, a given number of points is added to the resulting layer. A minimum distance can be specified to avoid point being too close to each other. + +qgis:randompointsinextent: > + This algorithm creates a new point layer with a given number of random points, all of them within a given extent. A distance factor can be specified, to avoid points being too close to each other. + +qgis:randompointsinlayerbounds: > + This algorithm creates a new point layer with a given number of random points, all of them within the extent of a given layer. A distance factor can be specified, to avoid points being too close to each other. + +qgis:randompointsinsidepolygonsfixed: > + This algorithms creates a new point layer with random points insides the ppolygons of a given layer. The number of points in each polygon can be defined as a fixed count or as a density value, and it will be the same for all polygons. + +qgis:randompointsinsidepolygonsvariable: > + This algorithm creates a new point layer with random points insides the ppolygons of a given layer. The number of points in each polygon can be defined as a fixed count or as a density value. The count/density valu is taken from an attribute, so it can be different for each polygons in the input layer. + +qgis:randomselection: > + This algorithm takes a vector layer and selects a subset of its features. No new layer is generated by this algorithm. + + The subset is defined randomly, using a percentage or count value to define the total number of features in the subset. + +qgis:randomselectionwithinsubsets: > + This algorithm takes a vector layer and selects a subset of its features. No new layer is generated by this algorithm. + + The subset is defined randomly, using a percentage or count value to define the total number of features in the subset. + + The percentage/count value is not applied to the whole layer, but instead to each category. Categories are defined according to a given attribute, which is also specified as an input parameter for the algorithm. + +qgis:rasterlayerhistogram: > + This algorithm generates a histogram with the values of a raster layer. + + The raster layer must have a single band. + +qgis:rasterlayerstatistics: > + This algorithm computes basic statistics from the values in a raster layer. + + The raster layer must have a single band. + +qgis:refactorfields: > + This algorithms allows editing the structure of the attributes table of a vector layer. Fields can be modified in their type and name, using a fields mapping + + The original layer is not modified. A new layer is generated, which contains a modified attributes table, accordint to the provided fields mapping + +qgis:regularpoints: + + +qgis:reprojectlayer: > + This algorithm reprojects a vector layer. It creates a new layer with the same features as the input one, but with geometries reprojected to a new CRS. + + Attributes are not modified by this algorithm. + +qgis:saveselectedfeatures: > + This algorithm creates a new layer with all the selected features in a given vector layer. + + If the selected layer has no selected features, all features will be added to the resulting feature. + +qgis:selectbyattribute: > + This algorithms creates a selection in a vector layer. The criteria for selected features is defined based on the values of an attribute from the input layer. + +qgis:selectbyattributesum: + + +qgis:selectbyexpression: > + This algorithms creates a selection in a vector layer. The criteria for selecting features is based on a QGIS expression. + + For more information about expressions see theuser manual + + +qgis:selectbylocation: > + This algorithms creates creates a selection in a vector layer. The criteria for selecting features is based on the spatial relationship between each feature and the features in an additional layer. + + +qgis:setstyleforrasterlayer: > + This algorithms sets the style of a raster layer. The style must be defined in a QML file. + +qgis:setstyleforvectorlayer: > + This algorithms sets the style of a vector layer. The style must be defined in a QML file. + +qgis:simplifygeometries: > + This algorithm simplifies the geometries in a line or polygon layer. It creates a new layer with the same features as the ones in the input layer, but with geometries containing a lower number of vertices. + +qgis:singlepartstomultipart: + + +qgis:snappointstogrid: > + This algorithm modifies the position of points in a vector layer, so they fall in the coordinates of a grid. + + +qgis:splitlineswithlines: > + This algorithm split the lines in a line layer using the lines in another line layer to define the breaking points. Intersection between geometries in both layers are considered as split points. + +qgis:splitvectorlayer: > + This algorithm takes a vector layer and an attribute and generates a set of vector layers in an outut folder. Each of the layers created in that folder contains all features from the input layer with the same value for the specified attribute. + + The number of files generated is equal to the nuber of different values found for the specified attribute. + +qgis:statisticsbycategories: + + +qgis:sumlinelengths: > + This algorithm takes a polygon layer and a lines layer and measure the total length of lines and the total numer of them that cross each polygon. + + The resulting layer has the same features as the input polygon layer, but with two additional attributes containing the length and lines count of layers across each polygon. The names of these two fields can be configured in the algorithm parameters. + +qgis:symmetricaldifference: > + This algorithm creates a layer containing features from both the Input and Difference layers but with the overlapping areas between the two layers removed. The attribute table of the Symmetrical Difference layer contains atrributes from both the Input and Difference layers. + + +qgis:texttofloat: > + This algorithm modifies the type of a given attribute in a vector layer, converting a text attribute containing numeric strings into a numeric attribute. + +qgis:union: > + This algorithm creates a layer containing all the features from both input layers. In the case of polygon layers, separate features are created for overlapping and non-overlapping features. The attribute table of the union layer contains attribute values from the respective input layer for non-overlapping features, and attribute values from both input layers for overlapping features. + + +qgis:variabledistancebuffer: > + This algorithm computes a buffer area for all the features in an input layer. The size of the buffer for a given feature is defined by an attribute, so it allows different features to have different buffer sizes. + + +qgis:vectorgrid: + + +qgis:vectorlayerhistogram: > + This algorithm generates a histogram with the values of the attribute of a vector layer. + + The ttribute to use for computing the histogram must be a numeric attribute. + +qgis:vectorlayerscatterplot: + + +qgis:voronoipolygons: > + This algorithm takes a points layer and generates a polygon layer containing the voronoi polygons corresponding to those input points. + +qgis:zonalstatistics: + diff --git a/python/plugins/processing/algs/otb/OTBAlgorithmProvider.py b/python/plugins/processing/algs/otb/OTBAlgorithmProvider.py index 33e8358e1d3e..c29ff54ea509 100644 --- a/python/plugins/processing/algs/otb/OTBAlgorithmProvider.py +++ b/python/plugins/processing/algs/otb/OTBAlgorithmProvider.py @@ -101,3 +101,6 @@ def unload(self): AlgorithmProvider.unload(self) ProcessingConfig.removeSetting(OTBUtils.OTB_FOLDER) ProcessingConfig.removeSetting(OTBUtils.OTB_LIB_FOLDER) + + def canBeActivated(self): + return not bool(OTBUtils.checkOtbConfiguration()) diff --git a/python/plugins/processing/algs/saga/SagaAlgorithm212.py b/python/plugins/processing/algs/saga/SagaAlgorithm212.py index be0d20f18f79..78981bc3c81d 100644 --- a/python/plugins/processing/algs/saga/SagaAlgorithm212.py +++ b/python/plugins/processing/algs/saga/SagaAlgorithm212.py @@ -38,9 +38,9 @@ from processing.core.parameters import getParameterFromString, ParameterExtent, ParameterRaster, ParameterVector, ParameterTable, ParameterMultipleInput, ParameterBoolean, ParameterFixedTable, ParameterNumber, ParameterSelection from processing.core.outputs import getOutputFromString, OutputTable, OutputVector, OutputRaster import SagaUtils -from SagaGroupNameDecorator import SagaGroupNameDecorator from processing.tools import dataobjects -from processing.tools.system import getTempFilename, isWindows, getTempFilenameInTempFolder +from processing.tools.system import getTempFilename, getTempFilenameInTempFolder +from processing.algs.saga.SagaNameDecorator import * pluginPath = os.path.normpath(os.path.join( os.path.split(os.path.dirname(__file__))[0], os.pardir)) @@ -74,15 +74,21 @@ def defineCharacteristicsFromFile(self): if '|' in self.name: tokens = self.name.split('|') self.name = tokens[0] - self.i18n_name = QCoreApplication.translate("SAGAAlgorithm", unicode(self.name)) + #cmdname is the name of the algorithm in SAGA, that is, the name to use to call it in the console self.cmdname = tokens[1] + else: self.cmdname = self.name self.i18n_name = QCoreApplication.translate("SAGAAlgorithm", unicode(self.name)) - self.name = self.name[0].upper() + self.name[1:].lower() + #_commandLineName is the name used in processing to call the algorithm + #Most of the time will be equal to the cmdname, but in same cases, several processing algorithms + #call the same SAGA one + self._commandLineName = self.createCommandLineName(self.name) + self.name = decoratedAlgorithmName(self.name) + self.i18n_name = QCoreApplication.translate("SAGAAlgorithm", unicode(self.name)) line = lines.readline().strip('\n').strip() self.undecoratedGroup = line - self.group = SagaGroupNameDecorator.getDecoratedName(self.undecoratedGroup) + self.group = decoratedGroupName(self.undecoratedGroup) self.i18n_group = QCoreApplication.translate("SAGAAlgorithm", self.group) line = lines.readline().strip('\n').strip() while line != '': @@ -332,3 +338,11 @@ def checkParameterValuesBeforeExecuting(self): extent2 = (layer.extent(), layer.height(), layer.width()) if extent != extent2: return self.tr("Input layers do not have the same grid extent.") + + def createCommandLineName(self, name): + validChars = \ + 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789:' + return 'saga:' + ''.join(c for c in name if c in validChars).lower() + + def commandLineName(self): + return self._commandLineName diff --git a/python/plugins/processing/algs/saga/SagaAlgorithmProvider.py b/python/plugins/processing/algs/saga/SagaAlgorithmProvider.py index f0b3c7ffc0c8..1288d45da2f9 100644 --- a/python/plugins/processing/algs/saga/SagaAlgorithmProvider.py +++ b/python/plugins/processing/algs/saga/SagaAlgorithmProvider.py @@ -49,7 +49,7 @@ class SagaAlgorithmProvider(AlgorithmProvider): "2.1.4": ("2.1.4", SagaAlgorithm214), "2.2.0": ("2.2.0", SagaAlgorithm214), "2.2.1": ("2.2.0", SagaAlgorithm214), - "2.2.2": ("2.2.2", SagaAlgorithm214)} + "2.2.0": ("2.2.2", SagaAlgorithm214)} def __init__(self): AlgorithmProvider.__init__(self) diff --git a/python/plugins/processing/algs/saga/SagaGroupNameDecorator.py b/python/plugins/processing/algs/saga/SagaGroupNameDecorator.py deleted file mode 100644 index fdfdcd4be25d..000000000000 --- a/python/plugins/processing/algs/saga/SagaGroupNameDecorator.py +++ /dev/null @@ -1,93 +0,0 @@ -# -*- coding: utf-8 -*- - -""" -*************************************************************************** - SagaGroupNameDecorator.py - --------------------- - Date : August 2012 - Copyright : (C) 2012 by Victor Olaya - Email : volayaf at gmail dot com -*************************************************************************** -* * -* This program is free software; you can redistribute it and/or modify * -* it under the terms of the GNU General Public License as published by * -* the Free Software Foundation; either version 2 of the License, or * -* (at your option) any later version. * -* * -*************************************************************************** -""" - -__author__ = 'Victor Olaya' -__date__ = 'August 2012' -__copyright__ = '(C) 2012, Victor Olaya' - -# This will get replaced with a git SHA1 when you do a git archive - -__revision__ = '$Format:%H$' - - -class SagaGroupNameDecorator: - - groups = {} - groups['contrib_a_perego'] = 'Contributions' - groups['statistics_grid'] = 'Geostatistics' - groups['statistics_kriging'] = 'Kriging' - groups['statistics_points'] = 'Geostatistics' - groups['statistics_regression'] = 'Geostatistics' - groups['grid_analysis'] = 'Grid - Analysis' - groups['grid_calculus'] = 'Grid - Calculus' - groups['grid_calculus_bsl'] = 'Grid - Calculus' - groups['grid_discretisation'] = 'Grid - Discretisation' - groups['grid_filter'] = 'Grid - Filter' - groups['grid_gridding'] = 'Grid - Gridding' - groups['grid_spline'] = 'Grid - Spline' - groups['grid_tools'] = 'Grid - Tools' - groups['grid_visualisation'] = 'Grid - Visualization' - groups['hacres'] = 'Hacres' - groups['imagery_segmentation'] = 'Imagery - Segmentation' - groups['imagery_classification'] = 'Imagery - Classification' - groups['imagery_rga'] = 'Imagery - RGA' - groups['imagery_tools'] = 'Imagery - Tools' - groups['io_esri_e00'] = 'I/O' - groups['io_gdal'] = 'I/O' - groups['io_gps'] = 'I/O' - groups['io_grid'] = 'I/O' - groups['io_grid_grib2'] = 'I/O' - groups['io_grid_image'] = 'I/O' - groups['io_odbc'] = 'I/O' - groups['io_shapes'] = 'I/O' - groups['io_shapes_dxf'] = 'I/O' - groups['io_shapes_las'] = 'I/O' - groups['io_table'] = 'I/O' - groups['lectures_introduction'] = 'Lectures' - groups['pj_georeference'] = 'Georeferencing' - groups['pj_geotrans'] = 'Projections and Transformations' - groups['pj_proj4'] = 'Projections and Transformations' - groups['pointcloud_tools'] = 'Point clouds' - groups['recreations_fractals'] = 'Recreations' - groups['recreations_games'] = 'Diversiones' - groups['shapes_grid'] = 'Shapes - Grid' - groups['shapes_lines'] = 'Shapes - Lines' - groups['shapes_points'] = 'Shapes - Points' - groups['shapes_polygons'] = 'Shapes - Polygons' - groups['shapes_tools'] = 'Shapes - Tools' - groups['shapes_transect'] = 'Shapes - Transect' - groups['sim_cellular_automata'] = 'Simulation - CA' - groups['sim_ecosystems_hugget'] = 'Simulation - Ecosystems' - groups['sim_fire_spreading'] = 'Simulation - Fire Spreading' - groups['sim_hydrology'] = 'Simulation - Hydrology' - groups['table_calculus'] = 'Table - Calculus' - groups['table_tools'] = 'Table - Tools' - groups['ta_channels'] = 'Terrain Analysis - Channels' - groups['ta_compound'] = 'Terrain Analysis - Morphometry' - groups['ta_hydrology'] = 'Terrain Analysis - Hydrology' - groups['ta_lighting'] = 'Terrain Analysis - Lighting' - groups['ta_morphometry'] = 'Terrain Analysis - Morphometry' - groups['ta_preprocessor'] = 'Terrain Analysis - Hydrology' - groups['ta_profiles'] = 'Terrain Analysis - Profiles' - groups['tin_tools'] = 'TIN' - groups['vigra'] = 'Vigra' - - @staticmethod - def getDecoratedName(groupName): - return SagaGroupNameDecorator.groups.get(groupName, groupName) diff --git a/python/plugins/processing/algs/saga/SagaNameDecorator.py b/python/plugins/processing/algs/saga/SagaNameDecorator.py new file mode 100644 index 000000000000..371486d5f660 --- /dev/null +++ b/python/plugins/processing/algs/saga/SagaNameDecorator.py @@ -0,0 +1,155 @@ +# -*- coding: utf-8 -*- + +""" +*************************************************************************** + SagaGroupNameDecorator.py + --------------------- + Date : August 2012 + Copyright : (C) 2012 by Victor Olaya + Email : volayaf at gmail dot com +*************************************************************************** +* * +* This program is free software; you can redistribute it and/or modify * +* it under the terms of the GNU General Public License as published by * +* the Free Software Foundation; either version 2 of the License, or * +* (at your option) any later version. * +* * +*************************************************************************** +""" + +__author__ = 'Victor Olaya' +__date__ = 'August 2012' +__copyright__ = '(C) 2012, Victor Olaya' + +# This will get replaced with a git SHA1 when you do a git archive + +__revision__ = '$Format:%H$' + +groups = {'grid_analysis': 'Raster analysis', + 'grid_calculus': 'Raster calculus', + 'grid_calculus_bsl': 'Raster calculus', + 'grid_discretisation': 'Raster analysis', + 'grid_filter': 'Raster filter', + 'grid_gridding': 'Raster creation tools', + 'grid_spline': 'Raster creation tools', + 'grid_tools': 'Raster tools', + 'grid_visualisation': 'Raster visualization', + 'imagery_classification': 'Image analysis', + 'imagery_rga': 'Image analysis', + 'imagery_segmentation': 'Image analysis', + 'imagery_tools': 'Image analysis', + 'io_esri_e00': 'I/O', + 'io_gdal': 'I/O', + 'io_gps': 'I/O', + 'io_grid': 'I/O', + 'io_grid_grib2': 'I/O', + 'io_grid_image': 'I/O', + 'io_odbc': 'I/O', + 'io_shapes': 'I/O', + 'io_shapes_dxf': 'I/O', + 'io_shapes_las': 'I/O', + 'io_table': 'I/O', + 'pj_georeference': 'Georeferencing', + 'pj_geotrans': 'Projections and Transformations', + 'pj_proj4': 'Projections and Transformations', + 'pointcloud_tools': 'Point clouds', + 'shapes_grid': 'Vector to raster', + 'shapes_lines': 'Vector line tools', + 'shapes_points': 'Vector point tools', + 'shapes_polygons': 'Vector polygon tools', + 'shapes_tools': 'Vector general tools', + 'shapes_transect': 'Vector general tools', + 'sim_cellular_automata': 'Simulation', + 'sim_ecosystems_hugget': 'Simulation', + 'sim_fire_spreading': 'Simulation', + 'sim_hydrology': 'Simulation', + 'statistics_grid': 'Geostatistics', + 'statistics_kriging': 'Raster creation tools', + 'statistics_points': 'Geostatistics', + 'statistics_regression': 'Geostatistics', + 'ta_channels': 'Terrain Analysis - Channels', + 'ta_compound': 'Terrain Analysis - Morphometry', + 'ta_hydrology': 'Terrain Analysis - Hydrology', + 'ta_lighting': 'Terrain Analysis - Lighting', + 'ta_morphometry': 'Terrain Analysis - Morphometry', + 'ta_preprocessor': 'Terrain Analysis - Hydrology', + 'ta_profiles': 'Terrain Analysis - Profiles', + 'table_calculus': 'Table tools', + 'table_tools': 'Table tools', + 'tin_tools': 'TIN'} + + +def decoratedGroupName(name): + return groups.get(name, name) + +algorithms = {'Add Grid Values to Points': 'Add raster values to points', + 'Add Grid Values to Shapes': 'Add raster values to features', + 'Change Grid Values': 'Reclassify values (simple)', + 'Clip Grid with Polygon': 'Clip raster with polygon', + 'Cluster Analysis for Grids': 'Cluster Analysis', + 'Contour Lines from Grid': 'Contour Lines', + 'Cubic Spline Approximation': 'Interpolate (Cubic spline)', + 'Cut Shapes Layer': 'Cut vector Layer', + 'Directional Statistics for Single Grid': 'Directional Statistics for raster layer', + 'Filter Clumps': 'Remove small pixel clumps (to no-data)', + 'Fire Risk Analysis': 'Fire Risk Analysis', + 'Fit N Points to shape': 'Fit n points in polygon', + 'Flat Detection': 'Flat Detection', + 'Flow Accumulation (Flow Tracing)': 'Catchment area (Flow Tracing)', + 'Flow Accumulation (Recursive)': 'Catchment area (Recursive)', + 'Flow Accumulation (Top-Down)': 'Catchment area', + 'GWR for Multiple Predictor Grids': 'GWR for Multiple Predictor layers', + 'GWR for Single Predictor Grid': 'GWR for Single Predictor layer', + 'Geographically Weighted Multiple Regression (Points/Grids)': 'Geographically Weighted Multiple Regression (Points/Raster)', + 'Geographically Weighted Regression (Points/Grid)': 'Geographically Weighted Regression (Points/Raster)', + 'Geometric Figures': 'Geometric Figures', + 'Get Shapes Extents': 'Feature extents', + "Global Moran's I for Grids": "Global Moran's I for raster layer", + 'Grid Buffer': 'Raster Buffer', + 'Grid Cell Index': 'Raster Cell Index', + 'Grid Difference': 'Raster Difference', + 'Grid Division': 'Raster Division', + 'Grid Masking': 'Raster Masking', + 'Grid Normalisation': 'Raster Normalisation', + 'Grid Orientation': 'Raster Orientation', + 'Grid Proximity Buffer': 'Raster Proximity Buffer', + 'Grid Skeletonization': 'Raster Skeletonization', + 'Grid Standardisation': 'Raster Standardisation', + 'Grid Statistics for Polygons': 'Raster Statistics for Polygons', + 'Grid Values to Points': 'Raster Values to Points', + 'Grid Values to Points (randomly)': 'Raster Values to Points (randomly)', + 'Grid Volume': 'Raster Volume', + 'Grids Product': 'Raster Product', + 'Grids Sum': 'Rasters Sum', + 'Inverse Distance Weighted': 'Inverse Distance Weighted Interpolation', + 'Identity': 'Polygon identity', + 'Merge Layers': 'Merge vector layers', + 'Modified Quadratic Shepard': 'Modified Quadratic Shepard interpolation', + 'Mosaick raster layers': 'Mosaic raster layers', + 'Multilevel B-Spline Interpolation': 'Multilevel B-Spline Interpolation', + 'Multilevel B-Spline Interpolation (from Grid)': 'Multilevel B-Spline Interpolation (from Raster)', + 'Multiple Regression Analysis (Grid/Grids)': 'Multiple Regression Analysis (Raster/Raster)', + 'Multiple Regression Analysis (Points/Grids)': 'Multiple Regression Analysis (Points/Raster)', + 'Proximity Grid': 'Proximity Raster', + 'QuadTree Structure to Shapes': 'QuadTree Structure to polygons', + 'Radius of Variance (Grid)': 'Radius of Variance (Raster)', + 'Reclassify Grid Values': 'Reclassify values', + 'Shapes Buffer (Attribute distance)': 'Variable distance buffer', + 'Shapes Buffer (Fixed distance)': 'fixed distance buffer', + 'Shapes to Grid': 'Rasterize', + 'Statistics for Grids': 'Statistics for Rasters', + 'Terrain Ruggedness Index (TRI)': 'Terrain Ruggedness Index (TRI)', + 'Thin Plate Spline (Global)': 'Thin Plate Spline (Global)', + 'Thin Plate Spline (Local)': 'Thin Plate Spline (Local)', + 'Thin Plate Spline (TIN)': 'Thin Plate Spline (TIN)', + 'Threshold Buffer': 'Threshold raster buffer', + 'Transform Shapes': 'Transform vector layer', + 'Transpose Grids': 'Transpose Raster layers', + 'Union': 'Polygon uUnion', + 'Update': 'Polygon update', + 'Upslope Area': 'Upslope Area', + 'Zonal Grid Statistics': 'Zonal raster statistics'} + +def decoratedAlgorithmName(name): + decorated = algorithms.get(name, name) + return decorated[0].upper() + decorated[1:].lower() diff --git a/python/plugins/processing/algs/saga/SplitRGBBands.py b/python/plugins/processing/algs/saga/SplitRGBBands.py index 52e4476e5ebd..27290b4f5cfa 100644 --- a/python/plugins/processing/algs/saga/SplitRGBBands.py +++ b/python/plugins/processing/algs/saga/SplitRGBBands.py @@ -49,7 +49,7 @@ def getIcon(self): def defineCharacteristics(self): self.name, self.i18n_name = self.trAlgorithm('Split RGB bands') - self.group, self.i18n_group = self.trAlgorithm('Grid - Tools') + self.group, self.i18n_group = self.trAlgorithm('Image tools') self.addParameter(ParameterRaster(SplitRGBBands.INPUT, self.tr('Input layer'), False)) self.addOutput(OutputRaster(SplitRGBBands.R, diff --git a/python/plugins/processing/algs/saga/saga_version_check.txt b/python/plugins/processing/algs/saga/saga_version_check.txt deleted file mode 100644 index fb55bf707da4..000000000000 --- a/python/plugins/processing/algs/saga/saga_version_check.txt +++ /dev/null @@ -1,185 +0,0 @@ - --------------------------------------------------- -description\AddPolygonAttributestoPoints.txt [ERROR] -["Unknown option 'FIELD'\n", '_____________________________________________\n', '\n', ' ##### ## ##### ##\n', ' ### ### ## ###\n', ' ### # ## ## #### # ##\n', ' ### ##### ## # #####\n', ' ##### # ## ##### # ##\n', '_____________________________________________\n', '\n', '_____________________________________________\n', 'library path:\td:\\saga2.1.2\\modules\\shapes_points.dll\n', 'library name:\tShapes - Points\n', 'tool name :\tAdd Polygon Attributes to Points\n', 'author :\tO.Conrad (c) 2009\n', '_____________________________________________\n', '\n', '\n', 'Usage: saga_cmd shapes_points 10 -INPUT [-OUTPUT ] -POLYGONS [-FIELDS ]\n', ' -INPUT: \tPoints\n', '\tShapes (input)\n', ' -OUTPUT: \tResult\n', '\tShapes (optional output)\n', ' -POLYGONS:\tPolygons\n', '\tShapes (input)\n', ' -FIELDS: \tAttributes\n', '\tTable fields\n', 'Error: executing tool [Add Polygon Attributes to Points]\n'] -Usage: saga_cmd shapes_points 10 -INPUT [-OUTPUT ] -POLYGONS [-FIELDS ] - -Parameters in description:['-INPUT', '-POLYGONS', '-FIELD', '-OUTPUT'] --------------------------------------------------- - --------------------------------------------------- -description\BurnStreamNetworkintoDEM.txt [ERROR] -["The value for the option 'FLOWDIR' must be specified.\n", '_____________________________________________\n', '\n', ' ##### ## ##### ##\n', ' ### ### ## ###\n', ' ### # ## ## #### # ##\n', ' ### ##### ## # #####\n', ' ##### # ## ##### # ##\n', '_____________________________________________\n', '\n', '_____________________________________________\n', 'library path:\td:\\saga2.1.2\\modules\\ta_preprocessor.dll\n', 'library name:\tTerrain Analysis - Preprocessing\n', 'tool name :\tBurn Stream Network into DEM\n', 'author :\tO.Conrad (c) 2011\n', '_____________________________________________\n', '\n', '\n', 'Usage: saga_cmd ta_preprocessor 6 -DEM [-BURN ] -STREAM -FLOWDIR [-METHOD ] [-EPSILON ]\n', ' -DEM: \tDEM\n', '\tGrid (input)\n', ' -BURN: \tProcessed DEM\n', '\tGrid (optional output)\n', ' -STREAM: \tStreams\n', '\tGrid (input)\n', ' -FLOWDIR:\tFlow Direction\n', '\tGrid (input)\n', ' -METHOD: \tMethod\n', '\tChoice\n', '\tAvailable Choices:\n', "\t[0] simply decrease cell's value by epsilon\n", "\t[1] lower cell's value to neighbours minimum value minus epsilon\n", '\t[2] trace stream network downstream\n', '\tDefault: 0\n', ' -EPSILON:\tEpsilon\n', '\tFloating point\n', '\tMinimum: 0.000000\n', '\tDefault: 1.000000\n', 'Error: executing tool [Burn Stream Network into DEM]\n'] -Usage: saga_cmd ta_preprocessor 6 -DEM [-BURN ] -STREAM -FLOWDIR [-METHOD ] [-EPSILON ] - -Parameters in description:['-DEM', '-STREAM', '-METHOD', '-BURN'] --------------------------------------------------- - --------------------------------------------------- -description\CellBalance.txt [ERROR] -["Unknown option 'WEIGHT'\n", '_____________________________________________\n', '\n', ' ##### ## ##### ##\n', ' ### ### ## ###\n', ' ### # ## ## #### # ##\n', ' ### ##### ## # #####\n', ' ##### # ## ##### # ##\n', '_____________________________________________\n', '\n', '_____________________________________________\n', 'library path:\td:\\saga2.1.2\\modules\\ta_hydrology.dll\n', 'library name:\tTerrain Analysis - Hydrology\n', 'tool name :\tCell Balance\n', 'author :\t(c) 2004 by V.Olaya, (c) 2006 by O.Conrad\n', '_____________________________________________\n', '\n', '\n', 'Usage: saga_cmd ta_hydrology 10 -DEM [-WEIGHTS ] [-WEIGHTS_DEFAULT ] [-BALANCE ] [-METHOD ]\n', ' -DEM: \tElevation\n', '\tGrid (input)\n', ' -WEIGHTS: \tWeights\n', '\tGrid (optional input)\n', ' -WEIGHTS_DEFAULT:\tDefault\n', '\tFloating point\n', '\tMinimum: 0.000000\n', '\tDefault: 1.000000\n', ' -BALANCE: \tCell Balance\n', '\tGrid (output)\n', ' -METHOD: \tMethod\n', '\tChoice\n', '\tAvailable Choices:\n', '\t[0] Deterministic 8\n', '\t[1] Multiple Flow Direction\n', '\tDefault: 0\n', 'Error: executing tool [Cell Balance]\n'] -Usage: saga_cmd ta_hydrology 10 -DEM [-WEIGHTS ] [-WEIGHTS_DEFAULT ] [-BALANCE ] [-METHOD ] - -Parameters in description:['-DEM', '-WEIGHTS', '-METHOD', '-BALANCE'] --------------------------------------------------- - --------------------------------------------------- -description\ConvergenceIndex(SearchRadius).txt [ERROR] -["Unknown option 'DISTANCE_WEIGHTING_WEIGHTING'\n", '_____________________________________________\n', '\n', ' ##### ## ##### ##\n', ' ### ### ## ###\n', ' ### # ## ## #### # ##\n', ' ### ##### ## # #####\n', ' ##### # ## ##### # ##\n', '_____________________________________________\n', '\n', '_____________________________________________\n', 'library path:\td:\\saga2.1.2\\modules\\ta_morphometry.dll\n', 'library name:\tTerrain Analysis - Morphometry\n', 'tool name :\tConvergence Index (Search Radius)\n', 'author :\tO.Conrad (c) 2003\n', '_____________________________________________\n', '\n', '\n', 'Usage: saga_cmd ta_morphometry 2 -ELEVATION [-CONVERGENCE ] [-RADIUS ] [-DISTANCE_WEIGHTING_DW_WEIGHTING ] [-DISTANCE_WEIGHTING_DW_IDW_POWER ] [-DISTANCE_WEIGHTING_DW_IDW_OFFSET] [-DISTANCE_WEIGHTING_DW_BANDWIDTH ] [-SLOPE] [-DIFFERENCE ]\n', ' -ELEVATION: \tElevation\n', '\tGrid (input)\n', ' -CONVERGENCE: \tConvergence Index\n', '\tGrid (output)\n', ' -RADIUS: \tRadius [Cells]\n', '\tFloating point\n', '\tMinimum: 1.000000\n', '\tDefault: 10.000000\n', ' -DISTANCE_WEIGHTING_DW_WEIGHTING:\tWeighting Function\n', '\tChoice\n', '\tAvailable Choices:\n', '\t[0] no distance weighting\n', '\t[1] inverse distance to a power\n', '\t[2] exponential\n', '\t[3] gaussian weighting\n', '\tDefault: 0\n', ' -DISTANCE_WEIGHTING_DW_IDW_POWER:\tInverse Distance Weighting Power\n', '\tFloating point\n', '\tMinimum: 0.000000\n', '\tDefault: 1.000000\n', ' -DISTANCE_WEIGHTING_DW_IDW_OFFSET \tInverse Distance Offset\n', '\tBoolean\n', '\tDefault: 1\n', ' -DISTANCE_WEIGHTING_DW_BANDWIDTH:\tGaussian and Exponential Weighting Bandwidth\n', '\tFloating point\n', '\tMinimum: 0.000000\n', '\tDefault: 1.000000\n', ' -SLOPE \tGradient\n', '\tBoolean\n', '\tDefault: 0\n', ' -DIFFERENCE: \tDifference\n', '\tChoice\n', '\tAvailable Choices:\n', '\t[0] direction to the center cell\n', "\t[1] center cell's aspect direction\n", '\tDefault: 0\n', 'Error: executing tool [Convergence Index (Search Radius)]\n'] -Usage: saga_cmd ta_morphometry 2 -ELEVATION [-CONVERGENCE ] [-RADIUS ] [-DISTANCE_WEIGHTING_DW_WEIGHTING ] [-DISTANCE_WEIGHTING_DW_IDW_POWER ] [-DISTANCE_WEIGHTING_DW_IDW_OFFSET] [-DISTANCE_WEIGHTING_DW_BANDWIDTH ] [-SLOPE] [-DIFFERENCE ] - -Parameters in description:['-ELEVATION', '-DISTANCE_WEIGHTING_WEIGHTING', '-DIFFERENCE', '-CONVERGENCE'] --------------------------------------------------- - --------------------------------------------------- -description\CurvatureClassification.txt [ERROR] -["Unknown option 'CPLAN'\n", '_____________________________________________\n', '\n', ' ##### ## ##### ##\n', ' ### ### ## ###\n', ' ### # ## ## #### # ##\n', ' ### ##### ## # #####\n', ' ##### # ## ##### # ##\n', '_____________________________________________\n', '\n', '_____________________________________________\n', 'library path:\td:\\saga2.1.2\\modules\\ta_morphometry.dll\n', 'library name:\tTerrain Analysis - Morphometry\n', 'tool name :\tCurvature Classification\n', 'author :\tO.Conrad (c) 2001\n', '_____________________________________________\n', '\n', '\n', 'Usage: saga_cmd ta_morphometry 4 -DEM [-CLASS ] [-THRESHOLD ]\n', ' -DEM: \tElevation\n', '\tGrid (input)\n', ' -CLASS: \tCurvature Classification\n', '\tGrid (output)\n', ' -THRESHOLD:\tThreshold for plane\n', '\tFloating point\n', '\tMinimum: 0.000000\n', '\tDefault: 0.000500\n', 'Error: executing tool [Curvature Classification]\n'] -Usage: saga_cmd ta_morphometry 4 -DEM [-CLASS ] [-THRESHOLD ] - -Parameters in description:['-CPLAN', '-CPROF', '-CLASS'] --------------------------------------------------- - --------------------------------------------------- -description\GridShrinkExpand.txt [ERROR] -["Unknown option 'OPERATION'\n", '_____________________________________________\n', '\n', ' ##### ## ##### ##\n', ' ### ### ## ###\n', ' ### # ## ## #### # ##\n', ' ### ##### ## # #####\n', ' ##### # ## ##### # ##\n', '_____________________________________________\n', '\n', '_____________________________________________\n', 'library path:\td:\\saga2.1.2\\modules\\grid_tools.dll\n', 'library name:\tGrid - Tools\n', 'tool name :\tResampling\n', 'author :\tO.Conrad (c) 2003\n', '_____________________________________________\n', '\n', '\n', 'Usage: saga_cmd grid_tools 0 -INPUT [-INPUT_ADD ] [-OUTPUT_ADD ] [-KEEP_TYPE] [-TARGET ] [-SCALE_UP_METHOD ] [-SCALE_DOWN_METHOD ] [-USER_XMIN ] [-USER_XMAX ] [-USER_YMIN ] [-USER_YMAX ] [-USER_SIZE ] [-USER_FIT ] [-USER_GRID ] [-GRID_GRID ]\n', ' -INPUT: \tGrid\n', '\tGrid (input)\n', ' -INPUT_ADD: \tAdditional Grids\n', '\tGrid list (optional input)\n', ' -OUTPUT_ADD: \tAdditional Grids\n', '\tGrid list (optional output)\n', ' -KEEP_TYPE \tPreserve Data Type\n', '\tBoolean\n', '\tDefault: 0\n', ' -TARGET: \tTarget Grid\n', '\tChoice\n', '\tAvailable Choices:\n', '\t[0] user defined\n', '\t[1] grid\n', '\tDefault: 0\n', ' -SCALE_UP_METHOD: \tInterpolation Method\n', '\tChoice\n', '\tAvailable Choices:\n', '\t[0] Nearest Neighbor\n', '\t[1] Bilinear Interpolation\n', '\t[2] Inverse Distance Interpolation\n', '\t[3] Bicubic Spline Interpolation\n', '\t[4] B-Spline Interpolation\n', '\t[5] Mean Value\n', '\t[6] Mean Value (cell area weighted)\n', '\t[7] Minimum Value\n', '\t[8] Maximum Value\n', '\t[9] Majority\n', '\tDefault: 6\n', ' -SCALE_DOWN_METHOD:\tInterpolation Method\n', '\tChoice\n', '\tAvailable Choices:\n', '\t[0] Nearest Neighbor\n', '\t[1] Bilinear Interpolation\n', '\t[2] Inverse Distance Interpolation\n', '\t[3] Bicubic Spline Interpolation\n', '\t[4] B-Spline Interpolation\n', '\tDefault: 4\n', ' -USER_XMIN: \tLeft\n', '\tFloating point\n', '\tDefault: 0.000000\n', ' -USER_XMAX: \tRight\n', '\tFloating point\n', '\tDefault: 0.000000\n', ' -USER_YMIN: \tBottom\n', '\tFloating point\n', '\tDefault: 0.000000\n', ' -USER_YMAX: \tTop\n', '\tFloating point\n', '\tDefault: 0.000000\n', ' -USER_SIZE: \tCellsize\n', '\tFloating point\n', '\tMinimum: 0.000000\n', '\tDefault: 1.000000\n', ' -USER_FIT: \tFit\n', '\tChoice\n', '\tAvailable Choices:\n', '\t[0] nodes\n', '\t[1] cells\n', '\tDefault: 0\n', ' -USER_GRID: \tGrid\n', '\tData Object (optional output)\n', ' -GRID_GRID: \tGrid\n', '\tGrid (optional input)\n', 'Error: executing tool [Resampling]\n'] -Usage: saga_cmd grid_tools 0 -INPUT [-INPUT_ADD ] [-OUTPUT_ADD ] [-KEEP_TYPE] [-TARGET ] [-SCALE_UP_METHOD ] [-SCALE_DOWN_METHOD ] [-USER_XMIN ] [-USER_XMAX ] [-USER_YMIN ] [-USER_YMAX ] [-USER_SIZE ] [-USER_FIT ] [-USER_GRID ] [-GRID_GRID ] - -Parameters in description:['-INPUT', '-OPERATION', '-MODE', '-METHOD_EXPAND', '-RESULT'] --------------------------------------------------- - --------------------------------------------------- -description\Layerofextremevalue.txt [ERROR] -["Unknown option 'GRIDS'\n", '_____________________________________________\n', '\n', ' ##### ## ##### ##\n', ' ### ### ## ###\n', ' ### # ## ## #### # ##\n', ' ### ##### ## # #####\n', ' ##### # ## ##### # ##\n', '_____________________________________________\n', '\n', '_____________________________________________\n', 'library path:\td:\\saga2.1.2\\modules\\grid_analysis.dll\n', 'library name:\tGrid - Analysis\n', 'tool name :\tAccumulated Cost (Isotropic)\n', 'author :\tCopyrights (c) 2004 by Victor Olaya\n', '_____________________________________________\n', '\n', '\n', 'Usage: saga_cmd grid_analysis 0 -COST -POINTS [-ACCCOST ] [-CLOSESTPT ] [-THRESHOLD ]\n', ' -COST: \tCost Grid\n', '\tGrid (input)\n', ' -POINTS: \tDestination Points\n', '\tGrid (input)\n', ' -ACCCOST: \tAccumulated Cost\n', '\tGrid (output)\n', ' -CLOSESTPT:\tClosest Point\n', '\tGrid (output)\n', ' -THRESHOLD:\tThreshold for different route\n', '\tFloating point\n', '\tDefault: 0.000000\n', 'Error: executing tool [Accumulated Cost (Isotropic)]\n'] -Usage: saga_cmd grid_analysis 0 -COST -POINTS [-ACCCOST ] [-CLOSESTPT ] [-THRESHOLD ] - -Parameters in description:['-GRIDS', '-CRITERIA', '-RESULT'] --------------------------------------------------- - --------------------------------------------------- -description\MergeShapesLayers.txt [ERROR] -["Unknown option 'MAIN'\n", '_____________________________________________\n', '\n', ' ##### ## ##### ##\n', ' ### ### ## ###\n', ' ### # ## ## #### # ##\n', ' ### ##### ## # #####\n', ' ##### # ## ##### # ##\n', '_____________________________________________\n', '\n', '_____________________________________________\n', 'library path:\td:\\saga2.1.2\\modules\\shapes_tools.dll\n', 'library name:\tShapes - Tools\n', 'tool name :\tCreate New Shapes Layer\n', 'author :\tO. Conrad (c) 2008\n', '_____________________________________________\n', '\n', '\n', 'Usage: saga_cmd shapes_tools 0 [-SHAPES ] [-NAME ] [-TYPE ] [-VERTEX ] [-NFIELDS ] [-FIELDS_NAME000 ] [-FIELDS_TYPE000 ] [-FIELDS_NAME001 ] [-FIELDS_TYPE001 ]\n', ' -SHAPES: \tShapes\n', '\tData Object (optional output)\n', ' -NAME: \tName\n', '\tText\n', '\tDefault: New Shapes Layer\n', ' -TYPE: \tShape Type\n', '\tChoice\n', '\tAvailable Choices:\n', '\t[0] Point\n', '\t[1] Multipoint\n', '\t[2] Lines\n', '\t[3] Polygon\n', '\tDefault: 0\n', ' -VERTEX: \tVertex Type\n', '\tChoice\n', '\tAvailable Choices:\n', '\t[0] x, y\n', '\t[1] x, y, z\n', '\t[2] x, y, z, m\n', '\tDefault: 0\n', ' -NFIELDS: \tNumber of Attributes\n', '\tInteger\n', '\tMinimum: 1\n', '\tDefault: 2\n', ' -FIELDS_NAME000:\tName\n', '\tText\n', '\tDefault: Name\n', ' -FIELDS_TYPE000:\tType\n', '\tChoice\n', '\tAvailable Choices:\n', '\t[0] character string\n', '\t[1] 1 byte integer\n', '\t[2] 2 byte integer\n', '\t[3] 4 byte integer\n', '\t[4] 4 byte floating point\n', '\t[5] 8 byte floating point\n', '\t[6] color (rgb)\n', '\tDefault: 0\n', ' -FIELDS_NAME001:\tName\n', '\tText\n', '\tDefault: Name\n', ' -FIELDS_TYPE001:\tType\n', '\tChoice\n', '\tAvailable Choices:\n', '\t[0] character string\n', '\t[1] 1 byte integer\n', '\t[2] 2 byte integer\n', '\t[3] 4 byte integer\n', '\t[4] 4 byte floating point\n', '\t[5] 8 byte floating point\n', '\t[6] color (rgb)\n', '\tDefault: 0\n', 'Error: executing tool [Create New Shapes Layer]\n'] -Usage: saga_cmd shapes_tools 0 [-SHAPES ] [-NAME ] [-TYPE ] [-VERTEX ] [-NFIELDS ] [-FIELDS_NAME000 ] [-FIELDS_TYPE000 ] [-FIELDS_NAME001 ] [-FIELDS_TYPE001 ] - -Parameters in description:['-MAIN', '-LAYERS', '-OUT'] --------------------------------------------------- - --------------------------------------------------- -description\PolynomialTrendfromGrids.txt [ERROR] -["Unknown option 'GRIDS'\n", '_____________________________________________\n', '\n', ' ##### ## ##### ##\n', ' ### ### ## ###\n', ' ### # ## ## #### # ##\n', ' ### ##### ## # #####\n', ' ##### # ## ##### # ##\n', '_____________________________________________\n', '\n', '_____________________________________________\n', 'library path:\td:\\saga2.1.2\\modules\\grid_calculus.dll\n', 'library name:\tGrid - Calculus\n', 'tool name :\tGrid Normalisation\n', 'author :\tO.Conrad (c) 2003\n', '_____________________________________________\n', '\n', '\n', 'Usage: saga_cmd grid_calculus 0 -INPUT [-OUTPUT ] [-RANGE_MIN ] [-RANGE_MAX ]\n', ' -INPUT: \tGrid\n', '\tGrid (input)\n', ' -OUTPUT: \tNormalised Grid\n', '\tGrid (output)\n', ' -RANGE_MIN:\tTarget Range\n', '\tValue range\n', ' -RANGE_MAX:\tTarget Range\n', '\tValue range\n', 'Error: executing tool [Grid Normalisation]\n'] -Usage: saga_cmd grid_calculus 0 -INPUT [-OUTPUT ] [-RANGE_MIN ] [-RANGE_MAX ] - -Parameters in description:['-GRIDS', '-Y_GRIDS', '-Y_TABLE', '-POLYNOM', '-PARMS', '-QUALITY'] --------------------------------------------------- - --------------------------------------------------- -description\RandomField.txt [ERROR] -["Unknown option 'OUTPUT'\n", '_____________________________________________\n', '\n', ' ##### ## ##### ##\n', ' ### ### ## ###\n', ' ### # ## ## #### # ##\n', ' ### ##### ## # #####\n', ' ##### # ## ##### # ##\n', '_____________________________________________\n', '\n', '_____________________________________________\n', 'library path:\td:\\saga2.1.2\\modules\\grid_calculus.dll\n', 'library name:\tGrid - Calculus\n', 'tool name :\tRandom Field\n', 'author :\tO.Conrad (c) 2005\n', '_____________________________________________\n', '\n', '\n', 'Usage: saga_cmd grid_calculus 7 [-TARGET ] [-METHOD ] [-RANGE_MIN ] [-RANGE_MAX ] [-MEAN ] [-STDDEV ] [-USER_XMIN ] [-USER_XMAX ] [-USER_YMIN ] [-USER_YMAX ] [-USER_SIZE ] [-USER_FIT ] [-USER_GRID ] [-GRID_GRID ]\n', ' -TARGET: \tTarget Grid\n', '\tChoice\n', '\tAvailable Choices:\n', '\t[0] user defined\n', '\t[1] grid\n', '\tDefault: 0\n', ' -METHOD: \tMethod\n', '\tChoice\n', '\tAvailable Choices:\n', '\t[0] Uniform\n', '\t[1] Gaussian\n', '\tDefault: 1\n', ' -RANGE_MIN:\tRange\n', '\tValue range\n', ' -RANGE_MAX:\tRange\n', '\tValue range\n', ' -MEAN: \tArithmetic Mean\n', '\tFloating point\n', '\tDefault: 0.000000\n', ' -STDDEV: \tStandard Deviation\n', '\tFloating point\n', '\tDefault: 1.000000\n', ' -USER_XMIN:\tLeft\n', '\tFloating point\n', '\tDefault: 0.000000\n', ' -USER_XMAX:\tRight\n', '\tFloating point\n', '\tDefault: 0.000000\n', ' -USER_YMIN:\tBottom\n', '\tFloating point\n', '\tDefault: 0.000000\n', ' -USER_YMAX:\tTop\n', '\tFloating point\n', '\tDefault: 0.000000\n', ' -USER_SIZE:\tCellsize\n', '\tFloating point\n', '\tMinimum: 0.000000\n', '\tDefault: 1.000000\n', ' -USER_FIT: \tFit\n', '\tChoice\n', '\tAvailable Choices:\n', '\t[0] nodes\n', '\t[1] cells\n', '\tDefault: 0\n', ' -USER_GRID:\tGrid\n', '\tData Object (optional output)\n', ' -GRID_GRID:\tGrid\n', '\tGrid (optional input)\n', 'Error: executing tool [Random Field]\n'] -Usage: saga_cmd grid_calculus 7 [-TARGET ] [-METHOD ] [-RANGE_MIN ] [-RANGE_MAX ] [-MEAN ] [-STDDEV ] [-USER_XMIN ] [-USER_XMAX ] [-USER_YMIN ] [-USER_YMAX ] [-USER_SIZE ] [-USER_FIT ] [-USER_GRID ] [-GRID_GRID ] - -Parameters in description:['-METHOD', '-OUTPUT'] --------------------------------------------------- - --------------------------------------------------- -description\RealAreaCalculation.txt [ERROR] -["Unknown option 'DEM'\n", '_____________________________________________\n', '\n', ' ##### ## ##### ##\n', ' ### ### ## ###\n', ' ### # ## ## #### # ##\n', ' ### ##### ## # #####\n', ' ##### # ## ##### # ##\n', '_____________________________________________\n', '\n', '_____________________________________________\n', 'library path:\td:\\saga2.1.2\\modules\\ta_morphometry.dll\n', 'library name:\tTerrain Analysis - Morphometry\n', 'tool name :\tSlope, Aspect, Curvature\n', 'author :\tO.Conrad (c) 2001\n', '_____________________________________________\n', '\n', '\n', 'Usage: saga_cmd ta_morphometry 0 -ELEVATION [-SLOPE ] [-ASPECT ] [-C_GENE ] [-C_PROF ] [-C_PLAN ] [-C_TANG ] [-C_LONG ] [-C_CROS ] [-C_MINI ] [-C_MAXI ] [-C_TOTA ] [-C_ROTO ] [-METHOD ] [-UNIT_SLOPE ] [-UNIT_ASPECT ]\n', ' -ELEVATION: \tElevation\n', '\tGrid (input)\n', ' -SLOPE: \tSlope\n', '\tGrid (output)\n', ' -ASPECT: \tAspect\n', '\tGrid (output)\n', ' -C_GENE: \tGeneral Curvature\n', '\tGrid (optional output)\n', ' -C_PROF: \tProfile Curvature\n', '\tGrid (optional output)\n', ' -C_PLAN: \tPlan Curvature\n', '\tGrid (optional output)\n', ' -C_TANG: \tTangential Curvature\n', '\tGrid (optional output)\n', ' -C_LONG: \tLongitudinal Curvature\n', '\tGrid (optional output)\n', ' -C_CROS: \tCross-Sectional Curvature\n', '\tGrid (optional output)\n', ' -C_MINI: \tMinimal Curvature\n', '\tGrid (optional output)\n', ' -C_MAXI: \tMaximal Curvature\n', '\tGrid (optional output)\n', ' -C_TOTA: \tTotal Curvature\n', '\tGrid (optional output)\n', ' -C_ROTO: \tFlow Line Curvature\n', '\tGrid (optional output)\n', ' -METHOD: \tMethod\n', '\tChoice\n', '\tAvailable Choices:\n', '\t[0] maximum slope (Travis et al. 1975)\n', '\t[1] maximum triangle slope (Tarboton 1997)\n', '\t[2] least squares fitted plane (Horn 1981, Costa-Cabral & Burgess 1996)\n', '\t[3] 6 parameter 2nd order polynom (Evans 1979)\n', '\t[4] 6 parameter 2nd order polynom (Heerdegen & Beran 1982)\n', '\t[5] 6 parameter 2nd order polynom (Bauer, Rohdenburg, Bork 1985)\n', '\t[6] 9 parameter 2nd order polynom (Zevenbergen & Thorne 1987)\n', '\t[7] 10 parameter 3rd order polynom (Haralick 1983)\n', '\tDefault: 6\n', ' -UNIT_SLOPE: \tSlope Units\n', '\tChoice\n', '\tAvailable Choices:\n', '\t[0] radians\n', '\t[1] degree\n', '\t[2] percent\n', '\tDefault: 0\n', ' -UNIT_ASPECT:\tAspect Units\n', '\tChoice\n', '\tAvailable Choices:\n', '\t[0] radians\n', '\t[1] degree\n', '\tDefault: 0\n', 'Error: executing tool [Slope, Aspect, Curvature]\n'] -Usage: saga_cmd ta_morphometry 0 -ELEVATION [-SLOPE ] [-ASPECT ] [-C_GENE ] [-C_PROF ] [-C_PLAN ] [-C_TANG ] [-C_LONG ] [-C_CROS ] [-C_MINI ] [-C_MAXI ] [-C_TOTA ] [-C_ROTO ] [-METHOD ] [-UNIT_SLOPE ] [-UNIT_ASPECT ] - -Parameters in description:['-DEM', '-AREA'] --------------------------------------------------- - --------------------------------------------------- -description\SAGAWetnessIndex.txt [ERROR] -["Unknown option 'C'\n", '_____________________________________________\n', '\n', ' ##### ## ##### ##\n', ' ### ### ## ###\n', ' ### # ## ## #### # ##\n', ' ### ##### ## # #####\n', ' ##### # ## ##### # ##\n', '_____________________________________________\n', '\n', '_____________________________________________\n', 'library path:\td:\\saga2.1.2\\modules\\ta_hydrology.dll\n', 'library name:\tTerrain Analysis - Hydrology\n', 'tool name :\tSAGA Wetness Index\n', 'author :\t(c) 2001 by J.Boehner, O.Conrad\n', '_____________________________________________\n', '\n', '\n', 'Usage: saga_cmd ta_hydrology 15 -DEM [-WEIGHT ] [-AREA ] [-SLOPE ] [-AREA_MOD ] [-TWI ] [-SUCTION ] [-AREA_TYPE ] [-SLOPE_TYPE ] [-SLOPE_MIN ] [-SLOPE_OFF ] [-SLOPE_WEIGHT ]\n', ' -DEM: \tElevation\n', '\tGrid (input)\n', ' -WEIGHT: \tWeights\n', '\tGrid (optional input)\n', ' -AREA: \tCatchment area\n', '\tGrid (output)\n', ' -SLOPE: \tCatchment slope\n', '\tGrid (output)\n', ' -AREA_MOD: \tModified Catchment Area\n', '\tGrid (output)\n', ' -TWI: \tTopographic Wetness Index\n', '\tGrid (output)\n', ' -SUCTION: \tSuction\n', '\tFloating point\n', '\tMinimum: 0.000000\n', '\tDefault: 10.000000\n', ' -AREA_TYPE: \tType of Area\n', '\tChoice\n', '\tAvailable Choices:\n', '\t[0] absolute catchment area\n', '\t[1] square root of catchment area\n', '\t[2] specific catchment area\n', '\tDefault: 1\n', ' -SLOPE_TYPE: \tType of Slope\n', '\tChoice\n', '\tAvailable Choices:\n', '\t[0] local slope\n', '\t[1] catchment slope\n', '\tDefault: 1\n', ' -SLOPE_MIN: \tMinimum Slope\n', '\tFloating point\n', '\tMinimum: 0.000000\n', '\tDefault: 0.000000\n', ' -SLOPE_OFF: \tOffset Slope\n', '\tFloating point\n', '\tMinimum: 0.000000\n', '\tDefault: 0.100000\n', ' -SLOPE_WEIGHT:\tSlope Weighting\n', '\tFloating point\n', '\tMinimum: 0.000000\n', '\tDefault: 1.000000\n', 'Error: executing tool [SAGA Wetness Index]\n'] -Usage: saga_cmd ta_hydrology 15 -DEM [-WEIGHT ] [-AREA ] [-SLOPE ] [-AREA_MOD ] [-TWI ] [-SUCTION ] [-AREA_TYPE ] [-SLOPE_TYPE ] [-SLOPE_MIN ] [-SLOPE_OFF ] [-SLOPE_WEIGHT ] - -Parameters in description:['-DEM', '-C', '-GN', '-CS', '-SB'] --------------------------------------------------- - --------------------------------------------------- -description\SkyViewFactor.txt [ERROR] -["Unknown option 'MAXRADIUS'\n", '_____________________________________________\n', '\n', ' ##### ## ##### ##\n', ' ### ### ## ###\n', ' ### # ## ## #### # ##\n', ' ### ##### ## # #####\n', ' ##### # ## ##### # ##\n', '_____________________________________________\n', '\n', '_____________________________________________\n', 'library path:\td:\\saga2.1.2\\modules\\ta_lighting.dll\n', 'library name:\tTerrain Analysis - Lighting, Visibility\n', 'tool name :\tSky View Factor\n', 'author :\tO.Conrad (c) 2008\n', '_____________________________________________\n', '\n', '\n', 'Usage: saga_cmd ta_lighting 3 -DEM [-VISIBLE ] [-SVF ] [-SIMPLE ] [-TERRAIN ] [-DISTANCE ] [-RADIUS ] [-METHOD ] [-DLEVEL ] [-NDIRS ]\n', ' -DEM: \tElevation\n', '\tGrid (input)\n', ' -VISIBLE: \tVisible Sky\n', '\tGrid (output)\n', ' -SVF: \tSky View Factor\n', '\tGrid (output)\n', ' -SIMPLE: \tSky View Factor (Simplified)\n', '\tGrid (optional output)\n', ' -TERRAIN: \tTerrain View Factor\n', '\tGrid (optional output)\n', ' -DISTANCE:\tView Distance\n', '\tGrid (optional output)\n', ' -RADIUS: \tMaximum Search Radius\n', '\tFloating point\n', '\tMinimum: 0.000000\n', '\tDefault: 10000.000000\n', ' -METHOD: \tMethod\n', '\tChoice\n', '\tAvailable Choices:\n', '\t[0] multi scale\n', '\t[1] sectors\n', '\tDefault: 1\n', ' -DLEVEL: \tMulti Scale Factor\n', '\tFloating point\n', '\tMinimum: 1.250000\n', '\tDefault: 3.000000\n', ' -NDIRS: \tNumber of Sectors\n', '\tInteger\n', '\tMinimum: 3\n', '\tDefault: 8\n', 'Error: executing tool [Sky View Factor]\n'] -Usage: saga_cmd ta_lighting 3 -DEM [-VISIBLE ] [-SVF ] [-SIMPLE ] [-TERRAIN ] [-DISTANCE ] [-RADIUS ] [-METHOD ] [-DLEVEL ] [-NDIRS ] - -Parameters in description:['-DEM', '-METHOD', '-VISIBLE', '-SVF', '-SIMPLE', '-TERRAIN'] --------------------------------------------------- - --------------------------------------------------- -description\SupervisedClassification.txt [ERROR] -["The value for the option 'STATS' must be specified.\n", '_____________________________________________\n', '\n', ' ##### ## ##### ##\n', ' ### ### ## ###\n', ' ### # ## ## #### # ##\n', ' ### ##### ## # #####\n', ' ##### # ## ##### # ##\n', '_____________________________________________\n', '\n', '_____________________________________________\n', 'library path:\td:\\saga2.1.2\\modules\\imagery_classification.dll\n', 'library name:\tImagery - Classification\n', 'tool name :\tSupervised Classification\n', 'author :\tO.Conrad (c) 2005\n', '_____________________________________________\n', '\n', '\n', 'Usage: saga_cmd imagery_classification 0 -GRIDS -ROI [-ROI_ID ] -STATS [-CLASS_INFO ] [-CLASSES ] [-QUALITY ] [-STATS_SRC ] [-METHOD ] [-NORMALISE] [-THRESHOLD_DIST ] [-THRESHOLD_PROB ] [-RELATIVE_PROB ] [-THRESHOLD_ANGLE ] [-WTA_0] [-WTA_1] [-WTA_2] [-WTA_3] [-WTA_4] [-WTA_5]\n', ' -GRIDS: \tGrids\n', '\tGrid list (input)\n', ' -ROI: \tTraining Areas\n', '\tShapes (input)\n', ' -ROI_ID: \tClass Identifier\n', '\tTable field\n', ' -STATS: \tClass Statistics\n', '\tTable (input)\n', ' -CLASS_INFO: \tSummary\n', '\tTable (output)\n', ' -CLASSES: \tClassification\n', '\tGrid (output)\n', ' -QUALITY: \tQuality\n', '\tGrid (optional output)\n', ' -STATS_SRC: \tGet Class Statistics from...\n', '\tChoice\n', '\tAvailable Choices:\n', '\t[0] training areas\n', '\t[1] table\n', '\tDefault: 0\n', ' -METHOD: \tMethod\n', '\tChoice\n', '\tAvailable Choices:\n', '\t[0] Binary Encoding\n', '\t[1] Parallelepiped\n', '\t[2] Minimum Distance\n', '\t[3] Mahalanobis Distance\n', '\t[4] Maximum Likelihood\n', '\t[5] Spectral Angle Mapping\n', '\t[6] Winner Takes All\n', '\tDefault: 2\n', ' -NORMALISE \tNormalise\n', '\tBoolean\n', '\tDefault: 0\n', ' -THRESHOLD_DIST: \tDistance Threshold\n', '\tFloating point\n', '\tMinimum: 0.000000\n', '\tDefault: 0.000000\n', ' -THRESHOLD_PROB: \tProbability Threshold (Percent)\n', '\tFloating point\n', '\tMinimum: 0.000000\n', '\tMaximum: 100.000000\n', '\tDefault: 0.000000\n', ' -RELATIVE_PROB: \tProbability Reference\n', '\tChoice\n', '\tAvailable Choices:\n', '\t[0] absolute\n', '\t[1] relative\n', '\tDefault: 1\n', ' -THRESHOLD_ANGLE:\tSpectral Angle Threshold (Degree)\n', '\tFloating point\n', '\tMinimum: 0.000000\n', '\tMaximum: 90.000000\n', '\tDefault: 0.000000\n', ' -WTA_0 \tBinary Encoding\n', '\tBoolean\n', '\tDefault: 0\n', ' -WTA_1 \tParallelepiped\n', '\tBoolean\n', '\tDefault: 0\n', ' -WTA_2 \tMinimum Distance\n', '\tBoolean\n', '\tDefault: 0\n', ' -WTA_3 \tMahalanobis Distance\n', '\tBoolean\n', '\tDefault: 0\n', ' -WTA_4 \tMaximum Likelihood\n', '\tBoolean\n', '\tDefault: 0\n', ' -WTA_5 \tSpectral Angle Mapping\n', '\tBoolean\n', '\tDefault: 0\n', 'Error: executing tool [Supervised Classification]\n'] -Usage: saga_cmd imagery_classification 0 -GRIDS -ROI [-ROI_ID ] -STATS [-CLASS_INFO ] [-CLASSES ] [-QUALITY ] [-STATS_SRC ] [-METHOD ] [-NORMALISE] [-THRESHOLD_DIST ] [-THRESHOLD_PROB ] [-RELATIVE_PROB ] [-THRESHOLD_ANGLE ] [-WTA_0] [-WTA_1] [-WTA_2] [-WTA_3] [-WTA_4] [-WTA_5] - -Parameters in description:['-GRIDS', '-ROI', '-ROI_ID', '-METHOD', '-RELATIVE_PROB', '-CLASS_INFO', '-CLASSES', '-QUALITY'] --------------------------------------------------- - --------------------------------------------------- -description\TerrainRuggednessIndex(TRI).txt [ERROR] -["Unknown option 'DISTANCE_WEIGHTING_WEIGHTING'\n", '_____________________________________________\n', '\n', ' ##### ## ##### ##\n', ' ### ### ## ###\n', ' ### # ## ## #### # ##\n', ' ### ##### ## # #####\n', ' ##### # ## ##### # ##\n', '_____________________________________________\n', '\n', '_____________________________________________\n', 'library path:\td:\\saga2.1.2\\modules\\ta_morphometry.dll\n', 'library name:\tTerrain Analysis - Morphometry\n', 'tool name :\tTerrain Ruggedness Index (TRI)\n', 'author :\tO.Conrad (c) 2010\n', '_____________________________________________\n', '\n', '\n', 'Usage: saga_cmd ta_morphometry 16 -DEM [-TRI ] [-RADIUS ] [-DISTANCE_WEIGHTING_DW_WEIGHTING ] [-DISTANCE_WEIGHTING_DW_IDW_POWER ] [-DISTANCE_WEIGHTING_DW_IDW_OFFSET] [-DISTANCE_WEIGHTING_DW_BANDWIDTH ]\n', ' -DEM: \tElevation\n', '\tGrid (input)\n', ' -TRI: \tTerrain Ruggedness Index (TRI)\n', '\tGrid (output)\n', ' -RADIUS: \tRadius (Cells)\n', '\tInteger\n', '\tMinimum: 1\n', '\tDefault: 1\n', ' -DISTANCE_WEIGHTING_DW_WEIGHTING:\tWeighting Function\n', '\tChoice\n', '\tAvailable Choices:\n', '\t[0] no distance weighting\n', '\t[1] inverse distance to a power\n', '\t[2] exponential\n', '\t[3] gaussian weighting\n', '\tDefault: 0\n', ' -DISTANCE_WEIGHTING_DW_IDW_POWER:\tInverse Distance Weighting Power\n', '\tFloating point\n', '\tMinimum: 0.000000\n', '\tDefault: 1.000000\n', ' -DISTANCE_WEIGHTING_DW_IDW_OFFSET \tInverse Distance Offset\n', '\tBoolean\n', '\tDefault: 1\n', ' -DISTANCE_WEIGHTING_DW_BANDWIDTH:\tGaussian and Exponential Weighting Bandwidth\n', '\tFloating point\n', '\tMinimum: 0.000000\n', '\tDefault: 1.000000\n', 'Error: executing tool [Terrain Ruggedness Index (TRI)]\n'] -Usage: saga_cmd ta_morphometry 16 -DEM [-TRI ] [-RADIUS ] [-DISTANCE_WEIGHTING_DW_WEIGHTING ] [-DISTANCE_WEIGHTING_DW_IDW_POWER ] [-DISTANCE_WEIGHTING_DW_IDW_OFFSET] [-DISTANCE_WEIGHTING_DW_BANDWIDTH ] - -Parameters in description:['-DEM', '-DISTANCE_WEIGHTING_WEIGHTING', '-TRI'] --------------------------------------------------- - --------------------------------------------------- -description\ThinPlateSpline(Global).txt [ERROR] -["Unknown option 'REGUL'\n", '_____________________________________________\n', '\n', ' ##### ## ##### ##\n', ' ### ### ## ###\n', ' ### # ## ## #### # ##\n', ' ### ##### ## # #####\n', ' ##### # ## ##### # ##\n', '_____________________________________________\n', '\n', '_____________________________________________\n', 'library path:\td:\\saga2.1.2\\modules\\grid_spline.dll\n', 'library name:\tGrid - Spline Interpolation\n', 'tool name :\tThin Plate Spline (Global)\n', 'author :\tO.Conrad (c) 2006\n', '_____________________________________________\n', '\n', '\n', 'Usage: saga_cmd grid_spline 0 -SHAPES [-FIELD ] [-TARGET ] [-REGULARISATION ] [-USER_XMIN ] [-USER_XMAX ] [-USER_YMIN ] [-USER_YMAX ] [-USER_SIZE ] [-USER_FIT ] [-USER_GRID ] [-GRID_GRID ]\n', ' -SHAPES: \tPoints\n', '\tShapes (input)\n', ' -FIELD: \tAttribute\n', '\tTable field\n', ' -TARGET: \tTarget Grid\n', '\tChoice\n', '\tAvailable Choices:\n', '\t[0] user defined\n', '\t[1] grid\n', '\tDefault: 0\n', ' -REGULARISATION:\tRegularisation\n', '\tFloating point\n', '\tMinimum: 0.000000\n', '\tDefault: 0.000100\n', ' -USER_XMIN: \tLeft\n', '\tFloating point\n', '\tDefault: 0.000000\n', ' -USER_XMAX: \tRight\n', '\tFloating point\n', '\tDefault: 0.000000\n', ' -USER_YMIN: \tBottom\n', '\tFloating point\n', '\tDefault: 0.000000\n', ' -USER_YMAX: \tTop\n', '\tFloating point\n', '\tDefault: 0.000000\n', ' -USER_SIZE: \tCellsize\n', '\tFloating point\n', '\tMinimum: 0.000000\n', '\tDefault: 1.000000\n', ' -USER_FIT: \tFit\n', '\tChoice\n', '\tAvailable Choices:\n', '\t[0] nodes\n', '\t[1] cells\n', '\tDefault: 0\n', ' -USER_GRID: \tGrid\n', '\tData Object (optional output)\n', ' -GRID_GRID: \tGrid\n', '\tGrid (optional input)\n', 'Error: executing tool [Thin Plate Spline (Global)]\n'] -Usage: saga_cmd grid_spline 0 -SHAPES [-FIELD ] [-TARGET ] [-REGULARISATION ] [-USER_XMIN ] [-USER_XMAX ] [-USER_YMIN ] [-USER_YMAX ] [-USER_SIZE ] [-USER_FIT ] [-USER_GRID ] [-GRID_GRID ] - -Parameters in description:['-SHAPES', '-FIELD', '-TARGET', '-USER_XMIN', '-USER_XMAX', '-USER_YMIN', '-USER_YMAX', '-USER_GRID'] --------------------------------------------------- - --------------------------------------------------- -description\ThinPlateSpline(Local).txt [ERROR] -["Unknown option 'MODE'\n", '_____________________________________________\n', '\n', ' ##### ## ##### ##\n', ' ### ### ## ###\n', ' ### # ## ## #### # ##\n', ' ### ##### ## # #####\n', ' ##### # ## ##### # ##\n', '_____________________________________________\n', '\n', '_____________________________________________\n', 'library path:\td:\\saga2.1.2\\modules\\grid_spline.dll\n', 'library name:\tGrid - Spline Interpolation\n', 'tool name :\tThin Plate Spline (Local)\n', 'author :\tO.Conrad (c) 2006\n', '_____________________________________________\n', '\n', '\n', 'Usage: saga_cmd grid_spline 1 -SHAPES [-FIELD ] [-TARGET ] [-REGULARISATION ] [-SEARCH_RANGE ] [-SEARCH_RADIUS ] [-SEARCH_POINTS_ALL ] [-SEARCH_POINTS_MAX ] [-SEARCH_DIRECTION ] [-USER_XMIN ] [-USER_XMAX ] [-USER_YMIN ] [-USER_YMAX ] [-USER_SIZE ] [-USER_FIT ] [-USER_GRID ] [-GRID_GRID ]\n', ' -SHAPES: \tPoints\n', '\tShapes (input)\n', ' -FIELD: \tAttribute\n', '\tTable field\n', ' -TARGET: \tTarget Grid\n', '\tChoice\n', '\tAvailable Choices:\n', '\t[0] user defined\n', '\t[1] grid\n', '\tDefault: 0\n', ' -REGULARISATION: \tRegularisation\n', '\tFloating point\n', '\tMinimum: 0.000000\n', '\tDefault: 0.000100\n', ' -SEARCH_RANGE: \tSearch Range\n', '\tChoice\n', '\tAvailable Choices:\n', '\t[0] local\n', '\t[1] global\n', '\tDefault: 0\n', ' -SEARCH_RADIUS: \tMaximum Search Distance\n', '\tFloating point\n', '\tMinimum: 0.000000\n', '\tDefault: 1000.000000\n', ' -SEARCH_POINTS_ALL:\tNumber of Points\n', '\tChoice\n', '\tAvailable Choices:\n', '\t[0] maximum number of nearest points\n', '\t[1] all points within search distance\n', '\tDefault: 0\n', ' -SEARCH_POINTS_MAX:\tMaximum Number of Points\n', '\tInteger\n', '\tMinimum: 1\n', '\tDefault: 20\n', ' -SEARCH_DIRECTION: \tSearch Direction\n', '\tChoice\n', '\tAvailable Choices:\n', '\t[0] all directions\n', '\t[1] quadrants\n', '\tDefault: 0\n', ' -USER_XMIN: \tLeft\n', '\tFloating point\n', '\tDefault: 0.000000\n', ' -USER_XMAX: \tRight\n', '\tFloating point\n', '\tDefault: 0.000000\n', ' -USER_YMIN: \tBottom\n', '\tFloating point\n', '\tDefault: 0.000000\n', ' -USER_YMAX: \tTop\n', '\tFloating point\n', '\tDefault: 0.000000\n', ' -USER_SIZE: \tCellsize\n', '\tFloating point\n', '\tMinimum: 0.000000\n', '\tDefault: 1.000000\n', ' -USER_FIT: \tFit\n', '\tChoice\n', '\tAvailable Choices:\n', '\t[0] nodes\n', '\t[1] cells\n', '\tDefault: 0\n', ' -USER_GRID: \tGrid\n', '\tData Object (optional output)\n', ' -GRID_GRID: \tGrid\n', '\tGrid (optional input)\n', 'Error: executing tool [Thin Plate Spline (Local)]\n'] -Usage: saga_cmd grid_spline 1 -SHAPES [-FIELD ] [-TARGET ] [-REGULARISATION ] [-SEARCH_RANGE ] [-SEARCH_RADIUS ] [-SEARCH_POINTS_ALL ] [-SEARCH_POINTS_MAX ] [-SEARCH_DIRECTION ] [-USER_XMIN ] [-USER_XMAX ] [-USER_YMIN ] [-USER_YMAX ] [-USER_SIZE ] [-USER_FIT ] [-USER_GRID ] [-GRID_GRID ] - -Parameters in description:['-SHAPES', '-FIELD', '-TARGET', '-MODE', '-SELECT', '-USER_XMIN', '-USER_XMAX', '-USER_YMIN', '-USER_YMAX', '-USER_GRID'] --------------------------------------------------- - --------------------------------------------------- -description\ThinPlateSpline(TIN).txt [ERROR] -["Unknown option 'REGUL'\n", '_____________________________________________\n', '\n', ' ##### ## ##### ##\n', ' ### ### ## ###\n', ' ### # ## ## #### # ##\n', ' ### ##### ## # #####\n', ' ##### # ## ##### # ##\n', '_____________________________________________\n', '\n', '_____________________________________________\n', 'library path:\td:\\saga2.1.2\\modules\\grid_spline.dll\n', 'library name:\tGrid - Spline Interpolation\n', 'tool name :\tThin Plate Spline (TIN)\n', 'author :\t(c) 2006 by O.Conrad\n', '_____________________________________________\n', '\n', '\n', 'Usage: saga_cmd grid_spline 2 -SHAPES [-FIELD ] [-TARGET ] [-REGULARISATION ] [-LEVEL ] [-FRAME] [-USER_XMIN ] [-USER_XMAX ] [-USER_YMIN ] [-USER_YMAX ] [-USER_SIZE ] [-USER_FIT ] [-USER_GRID ] [-GRID_GRID ]\n', ' -SHAPES: \tPoints\n', '\tShapes (input)\n', ' -FIELD: \tAttribute\n', '\tTable field\n', ' -TARGET: \tTarget Grid\n', '\tChoice\n', '\tAvailable Choices:\n', '\t[0] user defined\n', '\t[1] grid\n', '\tDefault: 0\n', ' -REGULARISATION:\tRegularisation\n', '\tFloating point\n', '\tMinimum: 0.000000\n', '\tDefault: 0.000100\n', ' -LEVEL: \tNeighbourhood\n', '\tChoice\n', '\tAvailable Choices:\n', '\t[0] immediate\n', '\t[1] level 1\n', '\t[2] level 2\n', '\tDefault: 1\n', ' -FRAME \tAdd Frame\n', '\tBoolean\n', '\tDefault: 1\n', ' -USER_XMIN: \tLeft\n', '\tFloating point\n', '\tDefault: 0.000000\n', ' -USER_XMAX: \tRight\n', '\tFloating point\n', '\tDefault: 0.000000\n', ' -USER_YMIN: \tBottom\n', '\tFloating point\n', '\tDefault: 0.000000\n', ' -USER_YMAX: \tTop\n', '\tFloating point\n', '\tDefault: 0.000000\n', ' -USER_SIZE: \tCellsize\n', '\tFloating point\n', '\tMinimum: 0.000000\n', '\tDefault: 1.000000\n', ' -USER_FIT: \tFit\n', '\tChoice\n', '\tAvailable Choices:\n', '\t[0] nodes\n', '\t[1] cells\n', '\tDefault: 0\n', ' -USER_GRID: \tGrid\n', '\tData Object (optional output)\n', ' -GRID_GRID: \tGrid\n', '\tGrid (optional input)\n', 'Error: executing tool [Thin Plate Spline (TIN)]\n'] -Usage: saga_cmd grid_spline 2 -SHAPES [-FIELD ] [-TARGET ] [-REGULARISATION ] [-LEVEL ] [-FRAME] [-USER_XMIN ] [-USER_XMAX ] [-USER_YMIN ] [-USER_YMAX ] [-USER_SIZE ] [-USER_FIT ] [-USER_GRID ] [-GRID_GRID ] - -Parameters in description:['-SHAPES', '-FIELD', '-TARGET', '-LEVEL', '-USER_XMIN', '-USER_XMAX', '-USER_YMIN', '-USER_YMAX', '-USER_GRID'] --------------------------------------------------- - --------------------------------------------------- -description\TopographicPositionIndex(TPI).txt [ERROR] -["Unknown option 'DISTANCE_WEIGHTING_WEIGHTING'\n", '_____________________________________________\n', '\n', ' ##### ## ##### ##\n', ' ### ### ## ###\n', ' ### # ## ## #### # ##\n', ' ### ##### ## # #####\n', ' ##### # ## ##### # ##\n', '_____________________________________________\n', '\n', '_____________________________________________\n', 'library path:\td:\\saga2.1.2\\modules\\ta_morphometry.dll\n', 'library name:\tTerrain Analysis - Morphometry\n', 'tool name :\tTopographic Position Index (TPI)\n', 'author :\tO.Conrad (c) 2011\n', '_____________________________________________\n', '\n', '\n', 'Usage: saga_cmd ta_morphometry 18 -DEM [-TPI ] [-STANDARD] [-RADIUS_MIN ] [-RADIUS_MAX ] [-DISTANCE_WEIGHTING_DW_WEIGHTING ] [-DISTANCE_WEIGHTING_DW_IDW_POWER ] [-DISTANCE_WEIGHTING_DW_IDW_OFFSET] [-DISTANCE_WEIGHTING_DW_BANDWIDTH ]\n', ' -DEM: \tElevation\n', '\tGrid (input)\n', ' -TPI: \tTopographic Position Index\n', '\tGrid (output)\n', ' -STANDARD \tStandardize\n', '\tBoolean\n', '\tDefault: 0\n', ' -RADIUS_MIN: \tRadius\n', '\tValue range\n', ' -RADIUS_MAX: \tRadius\n', '\tValue range\n', ' -DISTANCE_WEIGHTING_DW_WEIGHTING:\tWeighting Function\n', '\tChoice\n', '\tAvailable Choices:\n', '\t[0] no distance weighting\n', '\t[1] inverse distance to a power\n', '\t[2] exponential\n', '\t[3] gaussian weighting\n', '\tDefault: 0\n', ' -DISTANCE_WEIGHTING_DW_IDW_POWER:\tInverse Distance Weighting Power\n', '\tFloating point\n', '\tMinimum: 0.000000\n', '\tDefault: 1.000000\n', ' -DISTANCE_WEIGHTING_DW_IDW_OFFSET \tInverse Distance Offset\n', '\tBoolean\n', '\tDefault: 1\n', ' -DISTANCE_WEIGHTING_DW_BANDWIDTH:\tGaussian and Exponential Weighting Bandwidth\n', '\tFloating point\n', '\tMinimum: 0.000000\n', '\tDefault: 1.000000\n', 'Error: executing tool [Topographic Position Index (TPI)]\n'] -Usage: saga_cmd ta_morphometry 18 -DEM [-TPI ] [-STANDARD] [-RADIUS_MIN ] [-RADIUS_MAX ] [-DISTANCE_WEIGHTING_DW_WEIGHTING ] [-DISTANCE_WEIGHTING_DW_IDW_POWER ] [-DISTANCE_WEIGHTING_DW_IDW_OFFSET] [-DISTANCE_WEIGHTING_DW_BANDWIDTH ] - -Parameters in description:['-DEM', '-DISTANCE_WEIGHTING_WEIGHTING', '-TPI'] --------------------------------------------------- - --------------------------------------------------- -description\TPIBasedLandformClassification.txt [ERROR] -["Unknown option 'DISTANCE_WEIGHTING_WEIGHTING'\n", '_____________________________________________\n', '\n', ' ##### ## ##### ##\n', ' ### ### ## ###\n', ' ### # ## ## #### # ##\n', ' ### ##### ## # #####\n', ' ##### # ## ##### # ##\n', '_____________________________________________\n', '\n', '_____________________________________________\n', 'library path:\td:\\saga2.1.2\\modules\\ta_morphometry.dll\n', 'library name:\tTerrain Analysis - Morphometry\n', 'tool name :\tTPI Based Landform Classification\n', 'author :\tO.Conrad (c) 2011\n', '_____________________________________________\n', '\n', '\n', 'Usage: saga_cmd ta_morphometry 19 -DEM [-LANDFORMS ] [-RADIUS_A_MIN ] [-RADIUS_A_MAX ] [-RADIUS_B_MIN ] [-RADIUS_B_MAX ] [-DISTANCE_WEIGHTING_DW_WEIGHTING ] [-DISTANCE_WEIGHTING_DW_IDW_POWER ] [-DISTANCE_WEIGHTING_DW_IDW_OFFSET] [-DISTANCE_WEIGHTING_DW_BANDWIDTH ]\n', ' -DEM: \tElevation\n', '\tGrid (input)\n', ' -LANDFORMS: \tLandforms\n', '\tGrid (output)\n', ' -RADIUS_A_MIN: \tRadius\n', '\tValue range\n', ' -RADIUS_A_MAX: \tRadius\n', '\tValue range\n', ' -RADIUS_B_MIN: \tRadius\n', '\tValue range\n', ' -RADIUS_B_MAX: \tRadius\n', '\tValue range\n', ' -DISTANCE_WEIGHTING_DW_WEIGHTING:\tWeighting Function\n', '\tChoice\n', '\tAvailable Choices:\n', '\t[0] no distance weighting\n', '\t[1] inverse distance to a power\n', '\t[2] exponential\n', '\t[3] gaussian weighting\n', '\tDefault: 0\n', ' -DISTANCE_WEIGHTING_DW_IDW_POWER:\tInverse Distance Weighting Power\n', '\tFloating point\n', '\tMinimum: 0.000000\n', '\tDefault: 1.000000\n', ' -DISTANCE_WEIGHTING_DW_IDW_OFFSET \tInverse Distance Offset\n', '\tBoolean\n', '\tDefault: 1\n', ' -DISTANCE_WEIGHTING_DW_BANDWIDTH:\tGaussian and Exponential Weighting Bandwidth\n', '\tFloating point\n', '\tMinimum: 0.000000\n', '\tDefault: 1.000000\n', 'Error: executing tool [TPI Based Landform Classification]\n'] -Usage: saga_cmd ta_morphometry 19 -DEM [-LANDFORMS ] [-RADIUS_A_MIN ] [-RADIUS_A_MAX ] [-RADIUS_B_MIN ] [-RADIUS_B_MAX ] [-DISTANCE_WEIGHTING_DW_WEIGHTING ] [-DISTANCE_WEIGHTING_DW_IDW_POWER ] [-DISTANCE_WEIGHTING_DW_IDW_OFFSET] [-DISTANCE_WEIGHTING_DW_BANDWIDTH ] - -Parameters in description:['-DEM', '-DISTANCE_WEIGHTING_WEIGHTING', '-LANDFORMS'] --------------------------------------------------- - --------------------------------------------------- -description\VectorRuggednessMeasure(VRM).txt [ERROR] -["Unknown option 'DISTANCE_WEIGHTING_WEIGHTING'\n", '_____________________________________________\n', '\n', ' ##### ## ##### ##\n', ' ### ### ## ###\n', ' ### # ## ## #### # ##\n', ' ### ##### ## # #####\n', ' ##### # ## ##### # ##\n', '_____________________________________________\n', '\n', '_____________________________________________\n', 'library path:\td:\\saga2.1.2\\modules\\ta_morphometry.dll\n', 'library name:\tTerrain Analysis - Morphometry\n', 'tool name :\tVector Ruggedness Measure (VRM)\n', 'author :\tO.Conrad (c) 2010\n', '_____________________________________________\n', '\n', '\n', 'Usage: saga_cmd ta_morphometry 17 -DEM [-VRM ] [-RADIUS ] [-DISTANCE_WEIGHTING_DW_WEIGHTING ] [-DISTANCE_WEIGHTING_DW_IDW_POWER ] [-DISTANCE_WEIGHTING_DW_IDW_OFFSET] [-DISTANCE_WEIGHTING_DW_BANDWIDTH ]\n', ' -DEM: \tElevation\n', '\tGrid (input)\n', ' -VRM: \tVector Terrain Ruggedness (VRM)\n', '\tGrid (output)\n', ' -RADIUS: \tRadius (Cells)\n', '\tInteger\n', '\tMinimum: 1\n', '\tDefault: 1\n', ' -DISTANCE_WEIGHTING_DW_WEIGHTING:\tWeighting Function\n', '\tChoice\n', '\tAvailable Choices:\n', '\t[0] no distance weighting\n', '\t[1] inverse distance to a power\n', '\t[2] exponential\n', '\t[3] gaussian weighting\n', '\tDefault: 0\n', ' -DISTANCE_WEIGHTING_DW_IDW_POWER:\tInverse Distance Weighting Power\n', '\tFloating point\n', '\tMinimum: 0.000000\n', '\tDefault: 1.000000\n', ' -DISTANCE_WEIGHTING_DW_IDW_OFFSET \tInverse Distance Offset\n', '\tBoolean\n', '\tDefault: 1\n', ' -DISTANCE_WEIGHTING_DW_BANDWIDTH:\tGaussian and Exponential Weighting Bandwidth\n', '\tFloating point\n', '\tMinimum: 0.000000\n', '\tDefault: 1.000000\n', 'Error: executing tool [Vector Ruggedness Measure (VRM)]\n'] -Usage: saga_cmd ta_morphometry 17 -DEM [-VRM ] [-RADIUS ] [-DISTANCE_WEIGHTING_DW_WEIGHTING ] [-DISTANCE_WEIGHTING_DW_IDW_POWER ] [-DISTANCE_WEIGHTING_DW_IDW_OFFSET] [-DISTANCE_WEIGHTING_DW_BANDWIDTH ] - -Parameters in description:['-DEM', '-DISTANCE_WEIGHTING_WEIGHTING', '-VRM'] --------------------------------------------------- - --------------------------------------------------- -description\VegetationIndex[distancebased].txt [ERROR] -["Unknown option 'PVI'\n", '_____________________________________________\n', '\n', ' ##### ## ##### ##\n', ' ### ### ## ###\n', ' ### # ## ## #### # ##\n', ' ### ##### ## # #####\n', ' ##### # ## ##### # ##\n', '_____________________________________________\n', '\n', '_____________________________________________\n', 'library path:\td:\\saga2.1.2\\modules\\imagery_tools.dll\n', 'library name:\tImagery - Tools\n', 'tool name :\tVegetation Index (Distance Based)\n', 'author :\tV.Olaya (c) 2004, O.Conrad (c) 2011\n', '_____________________________________________\n', '\n', '\n', 'Usage: saga_cmd imagery_tools 0 -RED -NIR [-PVI0 ] [-PVI1 ] [-PVI2 ] [-PVI3 ] [-TSAVI ] [-ATSAVI ] [-INTERCEPT ] [-SLOPE ]\n', ' -RED: \tRed Reflectance\n', '\tGrid (input)\n', ' -NIR: \tNear Infrared Reflectance\n', '\tGrid (input)\n', ' -PVI0: \tPerpendicular Vegetation Index (Richardson and Wiegand, 1977)\n', '\tGrid (optional output)\n', ' -PVI1: \tPerpendicular Vegetation Index (Perry and Lautenschlager, 1984)\n', '\tGrid (optional output)\n', ' -PVI2: \tPerpendicular Vegetation Index (Walther and Shabaani)\n', '\tGrid (optional output)\n', ' -PVI3: \tPerpendicular Vegetation Index (Qi, et al., 1994)\n', '\tGrid (optional output)\n', ' -TSAVI: \tTransformed Soil Adjusted Vegetation Index (Baret et al. 1989)\n', '\tGrid (optional output)\n', ' -ATSAVI: \tTransformed Soil Adjusted Vegetation Index (Baret and Guyot, 1991)\n', '\tGrid (optional output)\n', ' -INTERCEPT:\tIntercept of Soil Line\n', '\tFloating point\n', '\tDefault: 0.000000\n', ' -SLOPE: \tSlope of Soil Line\n', '\tFloating point\n', '\tDefault: 0.500000\n', 'Error: executing tool [Vegetation Index (Distance Based)]\n'] -Usage: saga_cmd imagery_tools 0 -RED -NIR [-PVI0 ] [-PVI1 ] [-PVI2 ] [-PVI3 ] [-TSAVI ] [-ATSAVI ] [-INTERCEPT ] [-SLOPE ] - -Parameters in description:['-NIR', '-RED', '-PVI', '-PVI1', '-PVI2', '-PVI3'] --------------------------------------------------- - --------------------------------------------------- -description\VegetationIndex[slopebased].txt [ERROR] -["Unknown option 'NDVI'\n", '_____________________________________________\n', '\n', ' ##### ## ##### ##\n', ' ### ### ## ###\n', ' ### # ## ## #### # ##\n', ' ### ##### ## # #####\n', ' ##### # ## ##### # ##\n', '_____________________________________________\n', '\n', '_____________________________________________\n', 'library path:\td:\\saga2.1.2\\modules\\imagery_tools.dll\n', 'library name:\tImagery - Tools\n', 'tool name :\tVegetation Index (Distance Based)\n', 'author :\tV.Olaya (c) 2004, O.Conrad (c) 2011\n', '_____________________________________________\n', '\n', '\n', 'Usage: saga_cmd imagery_tools 0 -RED -NIR [-PVI0 ] [-PVI1 ] [-PVI2 ] [-PVI3 ] [-TSAVI ] [-ATSAVI ] [-INTERCEPT ] [-SLOPE ]\n', ' -RED: \tRed Reflectance\n', '\tGrid (input)\n', ' -NIR: \tNear Infrared Reflectance\n', '\tGrid (input)\n', ' -PVI0: \tPerpendicular Vegetation Index (Richardson and Wiegand, 1977)\n', '\tGrid (optional output)\n', ' -PVI1: \tPerpendicular Vegetation Index (Perry and Lautenschlager, 1984)\n', '\tGrid (optional output)\n', ' -PVI2: \tPerpendicular Vegetation Index (Walther and Shabaani)\n', '\tGrid (optional output)\n', ' -PVI3: \tPerpendicular Vegetation Index (Qi, et al., 1994)\n', '\tGrid (optional output)\n', ' -TSAVI: \tTransformed Soil Adjusted Vegetation Index (Baret et al. 1989)\n', '\tGrid (optional output)\n', ' -ATSAVI: \tTransformed Soil Adjusted Vegetation Index (Baret and Guyot, 1991)\n', '\tGrid (optional output)\n', ' -INTERCEPT:\tIntercept of Soil Line\n', '\tFloating point\n', '\tDefault: 0.000000\n', ' -SLOPE: \tSlope of Soil Line\n', '\tFloating point\n', '\tDefault: 0.500000\n', 'Error: executing tool [Vegetation Index (Distance Based)]\n'] -Usage: saga_cmd imagery_tools 0 -RED -NIR [-PVI0 ] [-PVI1 ] [-PVI2 ] [-PVI3 ] [-TSAVI ] [-ATSAVI ] [-INTERCEPT ] [-SLOPE ] - -Parameters in description:['-NIR', '-RED', '-NDVI', '-RATIO', '-TVI', '-CTVI', '-TTVI', '-NRATIO'] --------------------------------------------------- - diff --git a/python/plugins/processing/core/AlgorithmProvider.py b/python/plugins/processing/core/AlgorithmProvider.py index c00e089fd967..a4ccba4b116f 100644 --- a/python/plugins/processing/core/AlgorithmProvider.py +++ b/python/plugins/processing/core/AlgorithmProvider.py @@ -51,13 +51,9 @@ def __init__(self): def loadAlgorithms(self): self.algs = [] - name = 'ACTIVATE_' + self.getName().upper().replace(' ', '_') - if not ProcessingConfig.getSetting(name): - return - else: - self._loadAlgorithms() - for alg in self.algs: - alg.provider = self + self._loadAlgorithms() + for alg in self.algs: + alg.provider = self # Methods to be overridden. def _loadAlgorithms(self): @@ -124,6 +120,9 @@ def getSupportedOutputTableExtensions(self): def supportsNonFileBasedOutput(self): return False + def canBeActivated(self): + return True + def tr(self, string, context=''): if context == '': context = self.__class__.__name__ diff --git a/python/plugins/processing/core/GeoAlgorithm.py b/python/plugins/processing/core/GeoAlgorithm.py index 2fe79d5a118c..4633e976089e 100644 --- a/python/plugins/processing/core/GeoAlgorithm.py +++ b/python/plugins/processing/core/GeoAlgorithm.py @@ -18,6 +18,7 @@ """ + __author__ = 'Victor Olaya' __date__ = 'August 2012' __copyright__ = '(C) 2012, Victor Olaya' @@ -43,7 +44,7 @@ from processing.algs.gdal.GdalUtils import GdalUtils from processing.tools import dataobjects, vector from processing.tools.system import setTempOutput - +from processing.algs.help import shortHelp class GeoAlgorithm: @@ -105,44 +106,17 @@ def getIcon(self): def getDefaultIcon(): return GeoAlgorithm._icon - def help(self): - """Returns the help with the description of this algorithm. - It returns a tuple boolean, string. IF the boolean value is True, - it means that the string contains the actual description. If False, - it is an url or path to a file where the description is stored. - In both cases, the string or the content of the file have to be HTML, - ready to be set into the help display component. - - Returns None if there is no help file available. - - The default implementation looks for an HTML page in the QGIS - documentation site taking into account QGIS version. - """ - - qgsVersion = QGis.QGIS_VERSION_INT - major = qgsVersion / 10000 - minor = (qgsVersion - major * 10000) / 100 - if minor % 2 == 1: - qgsVersion = 'testing' - else: - qgsVersion = '{}.{}'.format(major, minor) - - providerName = self.provider.getName().lower() - groupName = self.group.lower() - groupName = groupName.replace('[', '').replace(']', '').replace(' - ', '_') - groupName = groupName.replace(' ', '_') - cmdLineName = self.commandLineName() - validChars = \ - 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_' - safeGroupName = ''.join(c for c in groupName if c in validChars) + def _formatHelp(self, text): + return "

%s

%s" % (self.name, "".join(["

%s

" % s for s in text.split("\n")])) - safeAlgName = self.name.lower().replace(' ', '-') - validChars = \ - 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-' - safeAlgName = ''.join(c for c in safeAlgName if c in validChars) + def help(self): + return False, None - helpUrl = 'http://docs.qgis.org/{}/en/docs/user_manual/processing_algs/{}/{}.html#{}'.format(qgsVersion, providerName, safeGroupName, safeAlgName) - return False, helpUrl + def shortHelp(self): + text = shortHelp.get(self.commandLineName(), None) + if text is not None: + text = self._formatHelp(text) + return text def processAlgorithm(self, progress): """Here goes the algorithm itself. diff --git a/python/plugins/processing/core/Processing.py b/python/plugins/processing/core/Processing.py index 488743e09f7b..c24380a9ee10 100644 --- a/python/plugins/processing/core/Processing.py +++ b/python/plugins/processing/core/Processing.py @@ -142,13 +142,11 @@ def initialize(): # And initialize AlgorithmClassification.loadClassification() - AlgorithmClassification.loadDisplayNames() ProcessingLog.startLogging() ProcessingConfig.initialize() ProcessingConfig.readSettings() RenderingStyles.loadStyles() Processing.loadFromProviders() - # Inform registered listeners that all providers' algorithms have been loaded Processing.fireAlgsListHasChanged() diff --git a/python/plugins/processing/gui/AlgorithmClassification.py b/python/plugins/processing/gui/AlgorithmClassification.py index f21a96d1931b..2633012ceb39 100644 --- a/python/plugins/processing/gui/AlgorithmClassification.py +++ b/python/plugins/processing/gui/AlgorithmClassification.py @@ -28,7 +28,6 @@ import os from PyQt4.QtCore import QCoreApplication -displayNames = {} classification = {} @@ -49,30 +48,9 @@ def loadClassification(): lines.close() -def loadDisplayNames(): - global displayNames - if not os.path.isfile(displayNamesFile()): - return - lines = open(displayNamesFile()) - line = lines.readline().strip('\n') - while line != '': - tokens = line.split(',') - try: - displayNames[tokens[0]] = tokens[1] - except: - raise Exception(line) - line = lines.readline().strip('\n') - lines.close() - - def classificationFile(): return os.path.join(os.path.dirname(__file__), 'algclasssification.txt') - -def displayNamesFile(): - return os.path.join(os.path.dirname(__file__), 'algnames.txt') - - def getClassificationEn(alg): if alg.commandLineName().lower() in classification: group, subgroup = classification[alg.commandLineName()] diff --git a/python/plugins/processing/gui/AlgorithmDialogBase.py b/python/plugins/processing/gui/AlgorithmDialogBase.py index 33cf78293c26..2af12a7c869f 100644 --- a/python/plugins/processing/gui/AlgorithmDialogBase.py +++ b/python/plugins/processing/gui/AlgorithmDialogBase.py @@ -26,13 +26,14 @@ __revision__ = '$Format:%H$' import os +import webbrowser from PyQt4 import uic -from PyQt4.QtCore import QCoreApplication, QUrl, QSettings, QByteArray +from PyQt4.QtCore import QCoreApplication, QSettings, QByteArray, SIGNAL, QUrl from PyQt4.QtGui import QApplication, QDialogButtonBox from qgis.utils import iface -from qgis.core import QgsNetworkAccessManager +from qgis.core import * from processing.core.ProcessingConfig import ProcessingConfig from processing.gui import AlgorithmClassification @@ -68,41 +69,42 @@ def __init__(self, alg): self.setWindowTitle(AlgorithmClassification.getDisplayName(self.alg)) - self.txtHelp.page().setNetworkAccessManager(QgsNetworkAccessManager.instance()) + algHelp = self.alg.shortHelp() + if algHelp is None: + self.textShortHelp.setVisible(False) + else: + self.textShortHelp.document().setDefaultStyleSheet('''.summary { margin-left: 10px; margin-right: 10px; } + h2 { color: #555555; padding-bottom: 15px; } + a { text-decoration: none; color: #3498db; font-weight: bold; } + p { color: #666666; } + b { color: #333333; } + dl dd { margin-bottom: 5px; }''') + self.textShortHelp.setHtml(algHelp) + + self.textShortHelp.setOpenLinks(False) + def linkClicked(url): + webbrowser.open(url.toString()) + self.textShortHelp.connect(self.textShortHelp, SIGNAL("anchorClicked(const QUrl&)"), linkClicked) + + self.textHelp.page().setNetworkAccessManager(QgsNetworkAccessManager.instance()) - # load algorithm help if available isText, algHelp = self.alg.help() if algHelp is not None: algHelp = algHelp if isText else QUrl(algHelp) + try: + if isText: + self.textHelp.setHtml(algHelp) + else: + self.textHelp.settings().clearMemoryCaches() + self.textHelp.load(algHelp) + except: + self.tabWidget.removeTab(2) else: - algHelp = self.tr('

Sorry, no help is available for this ' - 'algorithm.

') - try: - if isText: - self.txtHelp.setHtml(algHelp) - else: - self.txtHelp.settings().clearMemoryCaches() - self.tabWidget.setTabText(2, self.tr("Help (loading...)")) - self.tabWidget.setTabEnabled(2, False) - self.txtHelp.loadFinished.connect(self.loadFinished) - self.tabWidget.currentChanged.connect(self.loadHelp) - self.txtHelp.load(algHelp) - self.algHelp = algHelp - except: - self.txtHelp.setHtml( - self.tr('

Could not open help file :-(

')) + self.tabWidget.removeTab(2) self.showDebug = ProcessingConfig.getSetting( ProcessingConfig.SHOW_DEBUG_IN_DIALOG) - def loadFinished(self): - self.tabWidget.setTabEnabled(2, True) - self.tabWidget.setTabText(2, self.tr("Help")) - - def loadHelp(self, i): - if i == 2: - self.txtHelp.findText(self.alg.name) - def closeEvent(self, evt): self.settings.setValue("/Processing/dialogBase", self.saveGeometry()) diff --git a/python/plugins/processing/gui/BatchAlgorithmDialog.py b/python/plugins/processing/gui/BatchAlgorithmDialog.py index 0d4a8775ff41..416d775d0b3b 100644 --- a/python/plugins/processing/gui/BatchAlgorithmDialog.py +++ b/python/plugins/processing/gui/BatchAlgorithmDialog.py @@ -67,6 +67,8 @@ def __init__(self, alg): self.mainWidget = BatchPanel(self, self.alg) self.setMainWidget() + self.textShortHelp.setVisible(False) + def setParamValue(self, param, widget, alg=None): if isinstance(param, (ParameterRaster, ParameterVector, ParameterTable, ParameterMultipleInput)): diff --git a/python/plugins/processing/gui/ProcessingToolbox.py b/python/plugins/processing/gui/ProcessingToolbox.py index 89042290816e..f0443c1c6868 100644 --- a/python/plugins/processing/gui/ProcessingToolbox.py +++ b/python/plugins/processing/gui/ProcessingToolbox.py @@ -28,11 +28,10 @@ import os from PyQt4 import uic -from PyQt4.QtCore import Qt, QSettings, QCoreApplication -from PyQt4.QtGui import QMenu, QAction, QTreeWidgetItem +from PyQt4.QtCore import Qt, QSettings, QCoreApplication, SIGNAL +from PyQt4.QtGui import QMenu, QAction, QTreeWidgetItem, QLabel, QMessageBox from qgis.utils import iface -from processing.modeler.ModelerUtils import ModelerUtils from processing.core.Processing import Processing from processing.core.ProcessingLog import ProcessingLog from processing.core.ProcessingConfig import ProcessingConfig @@ -42,6 +41,8 @@ from processing.gui.AlgorithmDialog import AlgorithmDialog from processing.gui.BatchAlgorithmDialog import BatchAlgorithmDialog from processing.gui.EditRenderingStylesDialog import EditRenderingStylesDialog +from processing.gui.ConfigDialog import ConfigDialog + pluginPath = os.path.split(os.path.dirname(__file__))[0] WIDGET, BASE = uic.loadUiType( @@ -59,37 +60,59 @@ def __init__(self): self.setupUi(self) self.setAllowedAreas(Qt.LeftDockWidgetArea | Qt.RightDockWidgetArea) - self.modeComboBox.clear() - self.modeComboBox.addItems([self.tr('Simplified interface'), - self.tr('Advanced interface')]) - settings = QSettings() - if not settings.contains(self.USE_CATEGORIES): - settings.setValue(self.USE_CATEGORIES, True) - useCategories = settings.value(self.USE_CATEGORIES, type=bool) - if useCategories: - self.modeComboBox.setCurrentIndex(0) - else: - self.modeComboBox.setCurrentIndex(1) - self.modeComboBox.currentIndexChanged.connect(self.modeHasChanged) - self.searchBox.textChanged.connect(self.textChanged) self.algorithmTree.customContextMenuRequested.connect( self.showPopupMenu) self.algorithmTree.doubleClicked.connect(self.executeAlgorithm) - + self.txtDisabled.setVisible(False) + self.txtTip.setVisible(self.disabledProviders()) + self.txtDisabled.setOpenLinks(False) + self.txtTip.setOpenLinks(False) + self.txtDisabled.connect(self.txtDisabled, SIGNAL("anchorClicked(const QUrl&)"), + self.showDisabled) + def openSettings(): + dlg = ConfigDialog(self) + dlg.exec_() + self.txtTip.setVisible(self.disabledProviders()) + self.txtTip.connect(self.txtTip, SIGNAL("anchorClicked(const QUrl&)"), openSettings) if hasattr(self.searchBox, 'setPlaceholderText'): self.searchBox.setPlaceholderText(self.tr('Search...')) self.fillTree() + def showDisabled(self): + self.txtDisabled.setVisible(False) + for providerName in self.disabledWithMatchingAlgs: + self.disabledProviderItems[providerName].setHidden(False) + self.algorithmTree.expandAll() + + def disabledProviders(self): + for providerName in Processing.algs.keys(): + name = 'ACTIVATE_' + providerName.upper().replace(' ', '_') + if not ProcessingConfig.getSetting(name): + return True + return False + def textChanged(self): text = self.searchBox.text().strip(' ').lower() + for item in self.disabledProviderItems.values(): + item.setHidden(True) self._filterItem(self.algorithmTree.invisibleRootItem(), text) if text: self.algorithmTree.expandAll() + self.disabledWithMatchingAlgs = [] + for providerName, provider in Processing.algs.iteritems(): + name = 'ACTIVATE_' + providerName.upper().replace(' ', '_') + if not ProcessingConfig.getSetting(name): + for alg in provider.values(): + if text in alg.name: + self.disabledWithMatchingAlgs.append(providerName) + break + self.txtDisabled.setVisible(bool(self.disabledWithMatchingAlgs)) else: self.algorithmTree.collapseAll() self.algorithmTree.invisibleRootItem().child(0).setExpanded(True) + self.txtDisabled.setVisible(False) def _filterItem(self, item, text): if (item.childCount() > 0): @@ -97,31 +120,36 @@ def _filterItem(self, item, text): for i in xrange(item.childCount()): child = item.child(i) showChild = self._filterItem(child, text) - show = showChild or show + show = (showChild or show) and not item in self.disabledProviderItems.values() item.setHidden(not show) return show elif isinstance(item, (TreeAlgorithmItem, TreeActionItem)): hide = bool(text) and (text not in item.text(0).lower()) + if isinstance(item, TreeAlgorithmItem): + hide = hide and (text not in item.alg.commandLineName()) + if item.alg.shortHelp() is not None: + hide = hide and (text not in item.alg.shortHelp()) item.setHidden(hide) return not hide else: item.setHidden(True) return False - def modeHasChanged(self): - idx = self.modeComboBox.currentIndex() - settings = QSettings() - if idx == 0: - # Simplified - settings.setValue(self.USE_CATEGORIES, True) - else: - settings.setValue(self.USE_CATEGORIES, False) - + def activateProvider(self, providerName): + name = 'ACTIVATE_' + providerName.upper().replace(' ', '_') + ProcessingConfig.setSettingValue(name, True) self.fillTree() + self.textChanged() + self.showDisabled() + provider = Processing.getProviderFromName(providerName) + if not provider.canBeActivated(): + QMessageBox.warning(self, "Activate provider", + "The provider has been activated, but it might need additional configuration.") def algsListHasChanged(self): if self.updateAlgList: self.fillTree() + self.textChanged() def updateProvider(self, providerName, updateAlgsList=True): if updateAlgsList: @@ -229,13 +257,7 @@ def executeAlgorithm(self): action.execute() def fillTree(self): - settings = QSettings() - useCategories = settings.value(self.USE_CATEGORIES, type=bool) - if useCategories: - self.fillTreeUsingCategories() - else: - self.fillTreeUsingProviders() - self.algorithmTree.sortItems(0, Qt.AscendingOrder) + self.fillTreeUsingProviders() self.addRecentAlgorithms(False) def addRecentAlgorithms(self, updating): @@ -265,81 +287,23 @@ def addRecentAlgorithms(self, updating): self.algorithmTree.setWordWrap(True) - def fillTreeUsingCategories(self): - providersToExclude = ['model', 'script'] - self.algorithmTree.clear() - text = unicode(self.searchBox.text()) - groups = {} - for providerName in Processing.algs.keys(): - provider = Processing.algs[providerName] - name = 'ACTIVATE_' + providerName.upper().replace(' ', '_') - if not ProcessingConfig.getSetting(name): - continue - if providerName in providersToExclude or \ - len(ModelerUtils.providers[providerName].actions) != 0: - continue - algs = provider.values() - - # add algorithms - - for alg in algs: - if not alg.showInToolbox: - continue - altgroup, altsubgroup = AlgorithmClassification.getClassification(alg) - if altgroup is None: - continue - algName = AlgorithmClassification.getDisplayName(alg) - if text == '' or text.lower() in algName.lower(): - if altgroup not in groups: - groups[altgroup] = {} - group = groups[altgroup] - if altsubgroup not in group: - groups[altgroup][altsubgroup] = [] - subgroup = groups[altgroup][altsubgroup] - subgroup.append(alg) - - if len(groups) > 0: - mainItem = QTreeWidgetItem() - mainItem.setText(0, self.tr('Geoalgorithms')) - mainItem.setIcon(0, GeoAlgorithm.getDefaultIcon()) - mainItem.setToolTip(0, mainItem.text(0)) - for (groupname, group) in groups.items(): - groupItem = QTreeWidgetItem() - groupItem.setText(0, groupname) - groupItem.setIcon(0, GeoAlgorithm.getDefaultIcon()) - groupItem.setToolTip(0, groupItem.text(0)) - mainItem.addChild(groupItem) - for (subgroupname, subgroup) in group.items(): - subgroupItem = QTreeWidgetItem() - subgroupItem.setText(0, subgroupname) - subgroupItem.setIcon(0, GeoAlgorithm.getDefaultIcon()) - subgroupItem.setToolTip(0, subgroupItem.text(0)) - groupItem.addChild(subgroupItem) - for alg in subgroup: - algItem = TreeAlgorithmItem(alg) - subgroupItem.addChild(algItem) - - self.algorithmTree.addTopLevelItem(mainItem) - - for providerName in Processing.algs.keys(): - if providerName not in providersToExclude: - continue - name = 'ACTIVATE_' + providerName.upper().replace(' ', '_') - if not ProcessingConfig.getSetting(name): - continue - providerItem = TreeProviderItem(providerName) - self.algorithmTree.addTopLevelItem(providerItem) def fillTreeUsingProviders(self): self.algorithmTree.clear() + self.disabledProviderItems = {} + disabled = [] for providerName in Processing.algs.keys(): name = 'ACTIVATE_' + providerName.upper().replace(' ', '_') - if not ProcessingConfig.getSetting(name): - continue - providerItem = TreeProviderItem(providerName) - self.algorithmTree.addTopLevelItem(providerItem) - providerItem.setHidden(providerItem.childCount() == 0) - + if ProcessingConfig.getSetting(name): + providerItem = TreeProviderItem(providerName, self.algorithmTree, self) + providerItem.setHidden(providerItem.childCount() == 0) + else: + disabled.append(providerName) + self.algorithmTree.sortItems(0, Qt.AscendingOrder) + for providerName in sorted(disabled): + providerItem = TreeProviderItem(providerName, self.algorithmTree, self) + providerItem.setHidden(True) + self.disabledProviderItems[providerName] = providerItem class TreeAlgorithmItem(QTreeWidgetItem): @@ -369,8 +333,10 @@ def __init__(self, action): class TreeProviderItem(QTreeWidgetItem): - def __init__(self, providerName): - QTreeWidgetItem.__init__(self) + def __init__(self, providerName, tree, toolbox): + QTreeWidgetItem.__init__(self, tree) + self.tree = tree + self.toolbox = toolbox self.providerName = providerName self.provider = Processing.getProviderFromName(providerName) self.setIcon(0, self.provider.getIcon()) @@ -386,6 +352,9 @@ def populate(self): provider = Processing.algs[self.providerName] algs = provider.values() + name = 'ACTIVATE_' + self.providerName.upper().replace(' ', '_') + active = ProcessingConfig.getSetting(name) + # Add algorithms for alg in algs: if not alg.showInToolbox: @@ -395,10 +364,14 @@ def populate(self): else: groupItem = QTreeWidgetItem() name = alg.i18n_group or alg.group + if not active: + groupItem.setForeground(0, Qt.darkGray) groupItem.setText(0, name) groupItem.setToolTip(0, name) groups[alg.group] = groupItem algItem = TreeAlgorithmItem(alg) + if not active: + algItem.setForeground(0, Qt.darkGray) groupItem.addChild(algItem) count += 1 @@ -413,8 +386,21 @@ def populate(self): algItem = TreeActionItem(action) groupItem.addChild(algItem) - self.setText(0, self.provider.getDescription() - + QCoreApplication.translate("TreeProviderItem", " [{0} geoalgorithms]").format(count)) + text = self.provider.getDescription() + + if not active: + def activateProvider(): + self.toolbox.activateProvider(self.providerName) + label = QLabel(text + "    Activate") + label.setStyleSheet("QLabel {background-color: white; color: grey;}") + label.linkActivated.connect(activateProvider) + self.tree.setItemWidget(self, 0, label) + + else: + text += QCoreApplication.translate("TreeProviderItem", " [{0} geoalgorithms]").format(count) + self.setText(0, text) self.setToolTip(0, self.text(0)) for groupItem in groups.values(): self.addChild(groupItem) + + diff --git a/python/plugins/processing/modeler/ModelerDialog.py b/python/plugins/processing/modeler/ModelerDialog.py index 3720eb6547f3..1894e4851056 100644 --- a/python/plugins/processing/modeler/ModelerDialog.py +++ b/python/plugins/processing/modeler/ModelerDialog.py @@ -461,120 +461,21 @@ def getPositionForAlgorithmItem(self): return QPointF(newX, newY) def fillAlgorithmTree(self): - settings = QSettings() - useCategories = settings.value(self.USE_CATEGORIES, type=bool) - if useCategories: - self.fillAlgorithmTreeUsingCategories() - else: - self.fillAlgorithmTreeUsingProviders() - + self.fillAlgorithmTreeUsingProviders() self.algorithmTree.sortItems(0, Qt.AscendingOrder) text = unicode(self.searchBox.text()) if text != '': self.algorithmTree.expandAll() - def fillAlgorithmTreeUsingCategories(self): - providersToExclude = ['model', 'script'] + def fillAlgorithmTreeUsingProviders(self): self.algorithmTree.clear() text = unicode(self.searchBox.text()) - groups = {} allAlgs = ModelerUtils.allAlgs for providerName in allAlgs.keys(): - provider = allAlgs[providerName] - name = 'ACTIVATE_' + providerName.upper().replace(' ', '_') - if not ProcessingConfig.getSetting(name): - continue - if providerName in providersToExclude \ - or len(ModelerUtils.providers[providerName].actions) != 0: - continue - algs = provider.values() - - # Add algorithms - for alg in algs: - if not alg.showInModeler or alg.allowOnlyOpenedLayers: - continue - altgroup, altsubgroup = AlgorithmClassification.getClassification(alg) - if altgroup is None: - continue - algName = AlgorithmClassification.getDisplayName(alg) - if text == '' or text.lower() in algName.lower(): - if altgroup not in groups: - groups[altgroup] = {} - group = groups[altgroup] - if altsubgroup not in group: - groups[altgroup][altsubgroup] = [] - subgroup = groups[altgroup][altsubgroup] - subgroup.append(alg) - - if len(groups) > 0: - mainItem = QTreeWidgetItem() - mainItem.setText(0, self.tr('Geoalgorithms')) - mainItem.setIcon(0, GeoAlgorithm.getDefaultIcon()) - mainItem.setToolTip(0, mainItem.text(0)) - for (groupname, group) in groups.items(): - groupItem = QTreeWidgetItem() - groupItem.setText(0, groupname) - groupItem.setIcon(0, GeoAlgorithm.getDefaultIcon()) - groupItem.setToolTip(0, groupItem.text(0)) - mainItem.addChild(groupItem) - for (subgroupname, subgroup) in group.items(): - subgroupItem = QTreeWidgetItem() - subgroupItem.setText(0, subgroupname) - subgroupItem.setIcon(0, GeoAlgorithm.getDefaultIcon()) - subgroupItem.setToolTip(0, subgroupItem.text(0)) - groupItem.addChild(subgroupItem) - for alg in subgroup: - algItem = TreeAlgorithmItem(alg) - subgroupItem.addChild(algItem) - self.algorithmTree.addTopLevelItem(mainItem) - - for providerName in allAlgs.keys(): - groups = {} - provider = allAlgs[providerName] name = 'ACTIVATE_' + providerName.upper().replace(' ', '_') if not ProcessingConfig.getSetting(name): continue - if providerName not in providersToExclude: - continue - algs = provider.values() - - # Add algorithms - for alg in algs: - if not alg.showInModeler or alg.allowOnlyOpenedLayers: - continue - if text == '' or text.lower() in alg.name.lower(): - if alg.group in groups: - groupItem = groups[alg.group] - else: - groupItem = QTreeWidgetItem() - name = alg.i18n_group - groupItem.setText(0, name) - groupItem.setToolTip(0, name) - groups[alg.group] = groupItem - algItem = TreeAlgorithmItem(alg) - groupItem.addChild(algItem) - - if len(groups) > 0: - providerItem = QTreeWidgetItem() - providerItem.setText(0, - ModelerUtils.providers[providerName].getDescription()) - providerItem.setIcon(0, - ModelerUtils.providers[providerName].getIcon()) - providerItem.setToolTip(0, providerItem.text(0)) - for groupItem in groups.values(): - providerItem.addChild(groupItem) - self.algorithmTree.addTopLevelItem(providerItem) - providerItem.setExpanded(text != '') - for groupItem in groups.values(): - if text != '': - groupItem.setExpanded(True) - - def fillAlgorithmTreeUsingProviders(self): - self.algorithmTree.clear() - text = unicode(self.searchBox.text()) - allAlgs = ModelerUtils.allAlgs - for providerName in allAlgs.keys(): groups = {} provider = allAlgs[providerName] algs = provider.values() diff --git a/python/plugins/processing/tools/translation.py b/python/plugins/processing/tools/translation.py index ea95b1a7f986..e455242f6cf6 100644 --- a/python/plugins/processing/tools/translation.py +++ b/python/plugins/processing/tools/translation.py @@ -26,7 +26,7 @@ import os from processing.core.Processing import Processing from processing.gui.AlgorithmClassification import ( - loadClassification, loadDisplayNames, getClassificationEn, getDisplayNameEn) + loadClassification, getClassificationEn, getDisplayNameEn) def updateTranslations(): @@ -39,7 +39,6 @@ def updateTranslations(): """ loadClassification() - loadDisplayNames() f = open(os.path.join(os.path.dirname(__file__), '../algs/translations.py'), 'w') f.write('''# -*- coding: utf-8 -*- diff --git a/python/plugins/processing/ui/DlgAlgorithmBase.ui b/python/plugins/processing/ui/DlgAlgorithmBase.ui index bb6c046439a0..e76058e5a2bc 100644 --- a/python/plugins/processing/ui/DlgAlgorithmBase.ui +++ b/python/plugins/processing/ui/DlgAlgorithmBase.ui @@ -6,7 +6,7 @@ 0 0 - 745 + 841 525 @@ -15,69 +15,95 @@ - - - 0 - - - - Parameters - - - - 2 - - + + + + 0 - - - - - Log - - - - 2 - - - 0 - - - - - QFrame::NoFrame + + + Parameters + + + + 2 + + + 0 - - true + + + + + Log + + + + 2 - - - - - - - Help - - - - 2 + + 0 + + + + + QFrame::NoFrame + + + true + + + + + + + + Help + + + + 0 + + + 0 + + + + + + about:blank + + + + + + + + + + + + + 0 + 0 + - - 0 + + + 200 + 0 + - - - - - about:blank - - - - - - - + + + 300 + 16777215 + + + + + diff --git a/python/plugins/processing/ui/ProcessingToolbox.ui b/python/plugins/processing/ui/ProcessingToolbox.ui index b4df9f5fb5af..8505f4eea14d 100644 --- a/python/plugins/processing/ui/ProcessingToolbox.ui +++ b/python/plugins/processing/ui/ProcessingToolbox.ui @@ -16,7 +16,7 @@ - 3 + 0 0 @@ -44,7 +44,51 @@ - + + + + 16777215 + 50 + + + + background-color: rgb(255, 255, 127); + + + <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> +<html><head><meta name="qrichtext" content="1" /><style type="text/css"> +p, li { white-space: pre-wrap; } +</style></head><body style=" font-family:'MS Shell Dlg 2'; font-size:8.25pt; font-weight:400; font-style:normal;"> +<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:8pt;">There are disabled providers that contain algorithms including your text string. Click </span><a href="view"><span style=" font-size:8pt; text-decoration: underline; color:#0000ff;">to view them.</span></a><span style=" font-size:8pt;"></span></p></body></html> + + + + + + + + 0 + 0 + + + + + 16777215 + 50 + + + + background-color: rgb(85, 170, 255); +color: rgb(255, 255, 255); + + + <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> +<html><head><meta name="qrichtext" content="1" /><style type="text/css"> +p, li { white-space: pre-wrap; } +</style></head><body style=" font-family:'MS Shell Dlg 2'; font-size:8.25pt; font-weight:400; font-style:normal;"> +<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:8pt;">You can add more algorithms to the toolbox,</span><a href="enable"><span style=" font-size:8pt; text-decoration: underline; color:#0000ff;">enabling additional providers.</span></a></p></body></html> + +