This repository has been archived by the owner on Nov 5, 2021. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 203
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request #86 from lexon-foundation/master
Implemented Lexon highlighting
- Loading branch information
Showing
4 changed files
with
286 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,14 @@ | ||
/*--------------------------------------------------------------------------------------------- | ||
* Copyright (c) Microsoft Corporation. All rights reserved. | ||
* Licensed under the MIT License. See License.txt in the project root for license information. | ||
*--------------------------------------------------------------------------------------------*/ | ||
'use strict'; | ||
|
||
import { registerLanguage } from '../_.contribution'; | ||
|
||
registerLanguage({ | ||
id: 'lexon', | ||
extensions: ['.lex'], | ||
aliases: ['Lexon'], | ||
loader: () => import('./lexon') | ||
}); |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,131 @@ | ||
/*--------------------------------------------------------------------------------------------- | ||
* Copyright (c) Microsoft Corporation. All rights reserved. | ||
* Licensed under the MIT License. See License.txt in the project root for license information. | ||
*--------------------------------------------------------------------------------------------*/ | ||
|
||
'use strict'; | ||
|
||
import { testTokenization } from '../test/testRunner'; | ||
|
||
testTokenization('lexon', [ | ||
// Tests | ||
|
||
[{ | ||
line: 'LEX Paid Escrow', | ||
tokens: [ | ||
{ startIndex: 0, type: 'keyword.lexon' }, | ||
{ startIndex: 3, type: 'white.lexon' }, | ||
{ startIndex: 4, type: 'identifier.lexon' }, | ||
{ startIndex: 8, type: 'white.lexon' }, | ||
{ startIndex: 9, type: 'identifier.lexon' }, | ||
] | ||
}], | ||
|
||
[{ | ||
line: 'LEXON: 0.2.20', | ||
tokens: [ | ||
{ startIndex: 0, type: 'keyword.lexon' }, | ||
{ startIndex: 5, type: 'delimiter.lexon' }, | ||
{ startIndex: 6, type: 'white.lexon' }, | ||
{ startIndex: 7, type: 'number.semver.lexon' }, | ||
] | ||
}], | ||
|
||
[{ | ||
line: 'COMMENT: 3.f - an escrow that is controlled by a third party for a fee.', | ||
tokens: [ | ||
{ startIndex: 0, type: 'comment.lexon' }, | ||
] | ||
}], | ||
|
||
[{ | ||
line: '"Payer" is a person.', | ||
tokens: [ | ||
{ startIndex: 0, type: 'identifier.quote.lexon' }, | ||
{ startIndex: 1, type: 'identifier.lexon' }, | ||
{ startIndex: 6, type: 'identifier.quote.lexon' }, | ||
{ startIndex: 7, type: 'white.lexon' }, | ||
{ startIndex: 8, type: 'operator.lexon' }, | ||
{ startIndex: 10, type: 'white.lexon' }, | ||
{ startIndex: 11, type: 'identifier.lexon' }, | ||
{ startIndex: 12, type: 'white.lexon' }, | ||
{ startIndex: 13, type: 'keyword.type.lexon' }, | ||
{ startIndex: 19, type: 'delimiter.lexon' }, | ||
] | ||
}], | ||
|
||
[{ | ||
line: '"Fee" is an amount.', | ||
tokens: [ | ||
{ startIndex: 0, type: 'identifier.quote.lexon' }, | ||
{ startIndex: 1, type: 'identifier.lexon' }, | ||
{ startIndex: 4, type: 'identifier.quote.lexon' }, | ||
{ startIndex: 5, type: 'white.lexon' }, | ||
{ startIndex: 6, type: 'operator.lexon' }, | ||
{ startIndex: 8, type: 'white.lexon' }, | ||
{ startIndex: 9, type: 'identifier.lexon' }, | ||
{ startIndex: 11, type: 'white.lexon' }, | ||
{ startIndex: 12, type: 'keyword.type.lexon' }, | ||
{ startIndex: 18, type: 'delimiter.lexon' }, | ||
] | ||
}], | ||
|
||
[{ | ||
line: 'The Payer pays an Amount into escrow,', | ||
tokens: [ | ||
{ startIndex: 0, type: 'identifier.lexon' }, // The | ||
{ startIndex: 3, type: 'white.lexon' }, | ||
{ startIndex: 4, type: 'identifier.lexon' }, // Payer | ||
{ startIndex: 9, type: 'white.lexon' }, | ||
{ startIndex: 10, type: 'keyword.lexon' }, // pays | ||
{ startIndex: 14, type: 'white.lexon' }, | ||
{ startIndex: 15, type: 'identifier.lexon' }, // an | ||
{ startIndex: 17, type: 'white.lexon' }, | ||
{ startIndex: 18, type: 'keyword.type.lexon' }, // Amount | ||
{ startIndex: 24, type: 'white.lexon' }, | ||
{ startIndex: 25, type: 'keyword.lexon' }, // into | ||
{ startIndex: 29, type: 'white.lexon' }, | ||
{ startIndex: 30, type: 'identifier.lexon' }, // escrow | ||
{ startIndex: 36, type: 'delimiter.lexon' }, // , | ||
] | ||
}], | ||
|
||
[{ | ||
line: 'appoints the Payee,', | ||
tokens: [ | ||
{ startIndex: 0, type: 'keyword.lexon' }, // Appoints | ||
{ startIndex: 8, type: 'white.lexon' }, | ||
{ startIndex: 9, type: 'identifier.lexon' }, // the | ||
{ startIndex: 12, type: 'white.lexon' }, | ||
{ startIndex: 13, type: 'identifier.lexon' }, // Payee | ||
{ startIndex: 18, type: 'delimiter.lexon' }, // , | ||
] | ||
}], | ||
|
||
[{ | ||
line: 'and also fixes the Fee.', | ||
tokens: [ | ||
{ startIndex: 0, type: 'operator.lexon' }, // and | ||
{ startIndex: 3, type: 'white.lexon' }, | ||
{ startIndex: 4, type: 'identifier.lexon' }, // also | ||
{ startIndex: 8, type: 'white.lexon' }, | ||
{ startIndex: 9, type: 'identifier.lexon' }, // fixes | ||
{ startIndex: 14, type: 'white.lexon' }, | ||
{ startIndex: 15, type: 'identifier.lexon' }, // the | ||
{ startIndex: 18, type: 'white.lexon' }, | ||
{ startIndex: 19, type: 'identifier.lexon' }, // Fee | ||
{ startIndex: 22, type: 'delimiter.lexon' }, // . | ||
] | ||
}], | ||
|
||
[{ | ||
line: 'CLAUSE: Pay Out.', | ||
tokens: [ | ||
{ startIndex: 0, type: 'keyword.lexon' }, // CLAUSE | ||
{ startIndex: 6, type: 'delimiter.lexon' }, // : | ||
{ startIndex: 7, type: 'white.lexon' }, | ||
{ startIndex: 8, type: 'identifier.lexon' }, // Pay out | ||
{ startIndex: 15, type: 'delimiter.lexon' }, // . | ||
] | ||
}], | ||
]); |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,140 @@ | ||
/*--------------------------------------------------------------------------------------------- | ||
* Copyright (c) Microsoft Corporation. All rights reserved. | ||
* Licensed under the MIT License. See License.txt in the project root for license information. | ||
*--------------------------------------------------------------------------------------------*/ | ||
|
||
'use strict'; | ||
|
||
import IRichLanguageConfiguration = monaco.languages.LanguageConfiguration; | ||
import ILanguage = monaco.languages.IMonarchLanguage; | ||
|
||
export const conf: IRichLanguageConfiguration = { | ||
comments: { | ||
lineComment: 'COMMENT', | ||
// blockComment: ['COMMENT', '.'], | ||
}, | ||
brackets: [ | ||
['(', ')'] | ||
], | ||
autoClosingPairs: [ | ||
{ open: '{', close: '}' }, | ||
{ open: '[', close: ']' }, | ||
{ open: '(', close: ')' }, | ||
{ open: '"', close: '"', }, | ||
{ open: ':', close: '.', }, | ||
], | ||
surroundingPairs: [ | ||
{ open: '{', close: '}' }, | ||
{ open: '[', close: ']' }, | ||
{ open: '(', close: ')' }, | ||
{ open: '`', close: '`' }, | ||
{ open: '"', close: '"' }, | ||
{ open: '\'', close: '\'' }, | ||
{ open: ':', close: '.', }, | ||
], | ||
folding: { | ||
markers: { | ||
start: new RegExp("^\\s*(::\\s*|COMMENT\\s+)#region"), | ||
end: new RegExp("^\\s*(::\\s*|COMMENT\\s+)#endregion") | ||
} | ||
} | ||
}; | ||
|
||
export const language = <ILanguage>{ | ||
// Set defaultToken to invalid to see what you do not tokenize yet | ||
// defaultToken: 'invalid', | ||
tokenPostfix: '.lexon', | ||
ignoreCase: true, | ||
|
||
keywords: [ | ||
'lexon', 'lex', 'clause', 'terms', 'contracts', 'may', 'pay', | ||
'pays', 'appoints', 'into', 'to' | ||
], | ||
|
||
typeKeywords: [ | ||
'amount', 'person', 'key', 'time', 'date', 'asset', 'text' | ||
], | ||
|
||
operators: [ | ||
'less', 'greater', 'equal', 'le', 'gt', 'or', 'and', | ||
'add', 'added', 'subtract', 'subtracted', 'multiply', 'multiplied', 'times', 'divide', 'divided', | ||
'is', 'be', 'certified' | ||
], | ||
|
||
// we include these common regular expressions | ||
symbols: /[=><!~?:&|+\-*\/\^%]+/, | ||
|
||
|
||
// The main tokenizer for our languages | ||
tokenizer: { | ||
root: [ | ||
// comment | ||
[/^(\s*)(comment:?(?:\s.*|))$/, ['', 'comment']], | ||
|
||
// special identifier cases | ||
[/"/, { token: 'identifier.quote', bracket: '@open', next: '@quoted_identifier' }], | ||
['LEX$', { token: 'keyword', bracket: '@open', next: '@identifier_until_period' }], | ||
['LEXON', { token: 'keyword', bracket: '@open', next: '@semver' }], | ||
[':', { token: 'delimiter', bracket: '@open', next: '@identifier_until_period' }], | ||
|
||
// identifiers and keywords | ||
[/[a-z_$][\w$]*/, { | ||
cases: { | ||
'@operators': 'operator', | ||
'@typeKeywords': 'keyword.type', | ||
'@keywords': 'keyword', | ||
'@default': 'identifier' | ||
} | ||
}], | ||
|
||
// whitespace | ||
{ include: '@whitespace' }, | ||
|
||
// delimiters and operators | ||
[/[{}()\[\]]/, '@brackets'], | ||
[/[<>](?!@symbols)/, '@brackets'], | ||
[/@symbols/, 'delimiter'], | ||
|
||
// numbers | ||
[/\d*\.\d*\.\d*/, 'number.semver'], | ||
[/\d*\.\d+([eE][\-+]?\d+)?/, 'number.float'], | ||
[/0[xX][0-9a-fA-F]+/, 'number.hex'], | ||
[/\d+/, 'number'], | ||
|
||
// delimiter: after number because of .\d floats | ||
[/[;,.]/, 'delimiter'], | ||
], | ||
|
||
quoted_identifier: [ | ||
[/[^\\"]+/, 'identifier'], | ||
[/"/, { token: 'identifier.quote', bracket: '@close', next: '@pop' }] | ||
], | ||
|
||
space_identifier_until_period: [ | ||
[':', 'delimiter'], | ||
[' ', { token: 'white', next: '@identifier_rest' }], | ||
], | ||
|
||
identifier_until_period: [ | ||
{ include: '@whitespace' }, | ||
[':', { token: 'delimiter', next: '@identifier_rest' }], | ||
[/[^\\.]+/, 'identifier'], | ||
[/\./, { token: 'delimiter', bracket: '@close', next: '@pop' }] | ||
], | ||
|
||
identifier_rest: [ | ||
[/[^\\.]+/, 'identifier'], | ||
[/\./, { token: 'delimiter', bracket: '@close', next: '@pop' }] | ||
], | ||
|
||
semver: [ | ||
{ include: '@whitespace' }, | ||
[':', 'delimiter'], | ||
[/\d*\.\d*\.\d*/, { token: 'number.semver', bracket: '@close', next: '@pop' }] | ||
], | ||
|
||
whitespace: [ | ||
[/[ \t\r\n]+/, 'white'], | ||
], | ||
}, | ||
}; |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters