All files / languages language-server.ts

100% Statements 106/106
89.47% Branches 51/57
100% Functions 20/20
100% Lines 104/104

Press n or j to go to the next uncovered block, b, p or k for the previous block.

1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423                                                                                                                            3x                     132x   132x   132x   132x   132x   132x         132x                             132x                 132x 132x 132x 132x 132x   132x 132x 132x 132x   132x 132x       14x   14x 13x   1x         1x   1x   1x       3x       3x       3x       3x           3x           3x 2x             3x       2x   1x     2x 1x 1x           3x   3x   2x   1x       3x       1x         1x           1x                         2x 1x     1x   1x   1x                     4x 1x     3x       3x 3x   3x   3x 2x       2x   1x   1x             1x                     3x       3x   1x       1x                                               3x 5x   2x                       5x   5x   3x   3x 3x       3x             3x 1x         2x         2x 1x 1x   1x     2x           3x           63x 63x 63x   63x 315x     63x 215x 215x     215x 11x   11x 11x       215x 1075x       63x 315x     63x      
import { IParser, ILexer, IToken, IRecognitionException } from '@faubulous/mentor-rdf-parsers';
import {
	Connection,
	Diagnostic,
	DiagnosticSeverity,
	DidChangeConfigurationNotification,
	DidChangeConfigurationParams,
	DidChangeWatchedFilesParams,
	InitializeParams,
	InitializeResult,
	PublishDiagnosticsParams,
	Range,
	TextDocumentChangeEvent,
	TextDocuments,
	TextDocumentSyncKind
} from 'vscode-languageserver/browser';
import { TextDocument } from 'vscode-languageserver-textdocument';
import { LintDiagnosticsContext } from './linter-context';
import { Linter } from './linter';
import {
	DeprecatedWorkspaceUriLinter,
	InlineSingleUseBlankNodesLinter,
	NamespacePrefixLinter,
	XsdAnyUriLiteralLinter,
	XsdDatatypeValidationLinter,
} from './linters';
import {
	getNamespaceDefinition,
	PrefixMap,
} from '@src/utilities';
 
/**
 * The result of tokenizing a text document.
 */
export interface TokenizationResults {
	errors: IRecognitionException[];
	tokens: IToken[];
}
 
/**
 * Validation results for a text document.
 */
export interface ValidationResults extends PublishDiagnosticsParams {
	/**
	 * Tokens produced by the parser.
	 */
	tokens?: IToken[];
}
 
/**
 * Parser settings for a text document.
 */
interface ParserSettings {
	/**
	 * The maximum number of problems to report.
	 */
	maxNumberOfProblems: number;
}
 
/**
 * Default parser settings.
 */
const defaultSettings: ParserSettings = {
	maxNumberOfProblems: 1000
};
 
export class LanguageServerBase {
	readonly languageName: string;
 
	readonly languageId: string;
 
	readonly connection: Connection;
 
	readonly documents: TextDocuments<TextDocument> = new TextDocuments(TextDocument);
 
	readonly documentSettings: Map<string, Thenable<ParserSettings>> = new Map();
 
	hasConfigurationCapability = false;
 
	hasWorkspaceFolderCapability = false;
 
	hasDiagnosticRelatedInformationCapability = false;
 
	globalSettings: ParserSettings = defaultSettings;
 
	/**
	 * Indicates whether the language server should provide tokens for the document to the client via 'mentor.message.updateContext'.
	 */
	isRdfTokenProvider = false;
 
	/**
	 * The lexer used to tokenize the document. This is used to provide lexing diagnostics, but not for validating the document for errors, since that requires fully parsing it.
	 */
	lexer?: ILexer;
 
	/**
	 * The parser used to tokenize and validate the document.
	 */
	parser?: IParser;
 
	/**
	 * Pluggable lint rules that produce additional diagnostics from parsed tokens.
	 */
	readonly linters: Linter[] = [
		new DeprecatedWorkspaceUriLinter(),
		new InlineSingleUseBlankNodesLinter(),
		new NamespacePrefixLinter(),
		new XsdAnyUriLiteralLinter(),
		new XsdDatatypeValidationLinter(),
	];
 
	constructor(connection: Connection, langaugeId: string, languageName: string, lexer?: ILexer, parser?: IParser, isRdfTokenProvider = false) {
		this.languageName = languageName;
		this.languageId = langaugeId;
		this.lexer = lexer;
		this.parser = parser;
		this.isRdfTokenProvider = isRdfTokenProvider;
 
		this.connection = connection;
		this.connection.onInitialize(this.onInitializeConnection.bind(this));
		this.connection.onInitialized(this.onConnectionInitialized.bind(this));
		this.connection.onDidChangeConfiguration(this.onDidChangeConfiguration.bind(this));
 
		this.documents.onDidClose(this.onDidClose.bind(this));
		this.documents.onDidChangeContent(this.onDidChangeContent.bind(this));
	}
 
	protected log(message: string) {
		const msg = `[Server] ${message}`;
 
		if (this.connection.console) {
			this.connection.console.log(msg);
		} else {
			console.log(msg);
		}
	}
 
	start() {
		this.documents.listen(this.connection);
 
		this.connection.listen();
 
		this.log(`Started ${this.languageName} Language Server.`);
	}
 
	protected onInitializeConnection(params: InitializeParams) {
		const capabilities = params.capabilities;
 
		// Does the client support the `workspace/configuration` request?
		// If not, we fall back using global settings.
		this.hasConfigurationCapability = !!(
			capabilities.workspace && !!capabilities.workspace.configuration
		);
 
		this.hasWorkspaceFolderCapability = !!(
			capabilities.workspace && !!capabilities.workspace.workspaceFolders
		);
 
		this.hasDiagnosticRelatedInformationCapability = !!(
			capabilities.textDocument &&
			capabilities.textDocument.publishDiagnostics &&
			capabilities.textDocument.publishDiagnostics.relatedInformation
		);
 
		const result: InitializeResult = {
			capabilities: {
				textDocumentSync: TextDocumentSyncKind.Incremental
			}
		};
 
		if (this.hasWorkspaceFolderCapability) {
			result.capabilities.workspace = {
				workspaceFolders: {
					supported: true
				}
			};
		}
 
		return result;
	}
 
	protected onConnectionInitialized() {
		if (this.hasConfigurationCapability) {
			// Register for all configuration changes.
			this.connection.client.register(DidChangeConfigurationNotification.type, undefined);
		}
 
		if (this.hasWorkspaceFolderCapability) {
			this.connection.workspace.onDidChangeWorkspaceFolders(_event => {
				this.log('Workspace folder change event received.');
			});
		}
	}
 
	protected onDidChangeConfiguration(change: DidChangeConfigurationParams) {
		this.log(`Configuration changed.`);
 
		if (this.hasConfigurationCapability) {
			// Reset all cached document settings.
			this.documentSettings.clear();
		} else {
			this.globalSettings = <ParserSettings>((change.settings.languageServerExample || defaultSettings));
		}
 
		// Revalidate all open text documents.
		this.documents.all().forEach(doc => this.validateTextDocument(doc));
	}
 
	protected onDidChangeWatchedFiles(change: DidChangeWatchedFilesParams) {
		this.log(`Watched files changed.`);
	}
 
	protected onDidClose(e: TextDocumentChangeEvent<TextDocument>) {
		// Only keep settings for open documents.
		this.documentSettings.delete(e.document.uri);
	}
 
	protected onDidChangeContent(change: TextDocumentChangeEvent<TextDocument>) {
		// The content of a text document has changed. This event is emitted
		// when the text document first opened or when its content has changed.
		this.validateTextDocument(change.document);
	}
 
	/**
	 * Parses the content of a document and returns any recognition exceptions that 
	 * were thrown during parsing, which include both lexing and parsing errors. Note 
	 * that this requires fully parsing the document, including building the CST, 
	 * which can be a potentially expensive operation for large documents. Therefore, 
	 * we only do this in the language server and send the result to the client to not block the UI.
	 * @param content 
	 * @returns 
	 */
	protected async parse(content: string): Promise<TokenizationResults> {
		if (!this.lexer || !this.parser) {
			throw new Error('Lexer and parser are required for tokenization.');
		}
 
		const lexResult = this.lexer.tokenize(content);
 
		this.parser.parse(lexResult.tokens, false);
 
		return {
			tokens: lexResult.tokens,
			errors: [
				...this.parser.errors,
				...this.parser.semanticErrors
			],
		};
	}
 
	async validateTextDocument(document: TextDocument): Promise<void> {
		// The connection may not yet be initialized.
		if (!this?.connection) {
			return;
		}
 
		this.log(`Validating document: ${document.uri}`);
 
		// In this simple example we get the settings for every validate run.
		// const settings = await this._getDocumentSettings(document.uri);
		let diagnostics: Diagnostic[] = [];
		let tokens: IToken[] = [];
 
		const content = document.getText();
 
		if (content.length) {
			try {
				// Validating the document for errors requires fully parsing it, including building the CST.
				// Since this is a potentially expensive operation, we only do it in the language server and
				// send the result to the client to not block the UI..
				const result = await this.parse(content);
 
				tokens = result.tokens;
 
				diagnostics = [
					...this.getLexDiagnostics(document, result.tokens),
					...this.getParseDiagnostics(document, result.errors),
					...this.getLintDiagnostics(document, content, result.tokens)
				];
			}
			catch (e) {
				diagnostics = [
					{
						severity: DiagnosticSeverity.Error,
						message: e ? e.toString() : "An error occurred while parsing the document.",
						range: Range.create(0, 0, 0, 0)
					}
				];
			}
		}
 
		// Send the computed diagnostics to the client.
		this.connection.sendDiagnostics({ uri: document.uri, diagnostics });
 
		// Always send token notification to unblock the client, even for empty files or parsing errors.
		// The client needs this to resolve pending token requests and avoid timeout errors.
		if (this.isRdfTokenProvider) {
			// This sends the tokens to the client so that they can be used to build a reference index.
			this.connection.sendNotification('mentor.message.updateContext', {
				uri: document.uri,
				languageId: this.languageId,
				// Important: We need to clone the tokens so that they can be processed by strucutredClone() of the underlying message channel.
				tokens: tokens.map(t => ({
					image: t.image,
					startOffset: t.startOffset,
					endOffset: t.endOffset,
					startLine: t.startLine,
					endLine: t.endLine,
					startColumn: t.startColumn,
					endColumn: t.endColumn,
					tokenTypeIdx: t.tokenTypeIdx,
					tokenType: {
						name: t.tokenType?.name ?? '',
						tokenName: t.tokenType?.name,
						GROUP: t.tokenType?.GROUP,
					},
					// TODO: Define the interface / or clone method in mentor-rdf and use this token interface instead of the Chevrotain IToken to avoid having to clone the tokens here. --- IGNORE ---
					payload: {
						...t.payload
					}
				}))
			});
		}
	}
 
	protected getLexDiagnostics(document: TextDocument, tokens: IToken[]) {
		return tokens
			.filter((t) => t?.tokenType?.name === 'Unknown')
			.map(
				(unknownToken): Diagnostic => ({
					severity: DiagnosticSeverity.Error,
					message: `Unknown token`,
					range: {
						start: document.positionAt(unknownToken.startOffset),
						end: document.positionAt((unknownToken.endOffset ?? unknownToken.startOffset) + 1),
					},
				})
			);
	}
 
	protected getParseDiagnostics(document: TextDocument, errors: IRecognitionException[]) {
		const content = document.getText();
 
		return errors.map(
			(error): Diagnostic => {
				const { message, name, context, token } = error;
 
				const ruleStack = context ? context.ruleStack : null;
				const source = ruleStack && ruleStack.length > 0
					? ruleStack[ruleStack.length - 1]
					: undefined;
 
				const constructedDiagnostic: Partial<Diagnostic> = {
					code: name,
					message,
					source,
					severity: DiagnosticSeverity.Error,
				};
 
				if (token.tokenType?.name !== 'EOF') {
					constructedDiagnostic.range = Range.create(
						document.positionAt(token.startOffset),
						document.positionAt((token.endOffset ?? token.startOffset) + 1)
					);
				} else {
					const { previousToken = {} } = error as any; // chevrotain doesn't have this typed fully, but it exists for early exit exceptions
 
					let rangeStart;
					let rangeEnd;
 
					if (typeof previousToken.endOffset !== 'undefined') {
						rangeStart = Math.min(previousToken.endOffset + 1, content.length);
						rangeEnd = Math.min(previousToken.endOffset + 2, content.length);
					} else {
						rangeStart = rangeEnd = content.length;
					}
 
					constructedDiagnostic.range = Range.create(
						document.positionAt(rangeStart),
						document.positionAt(rangeEnd)
					);
				}
 
				return constructedDiagnostic as Diagnostic;
			}
		);
	}
 
	protected getLintDiagnostics(document: TextDocument, content: string, tokens: IToken[]): Diagnostic[] {
		const prefixes: PrefixMap = {};
		const context: LintDiagnosticsContext = { document, content, tokens, prefixes };
		const result: Diagnostic[] = [];
 
		for (const linter of this.linters) {
			linter.reset?.();
		}
 
		for (let i = 0; i < tokens.length; i++) {
			const token = tokens[i];
			const type = token.tokenType?.name;
 
			// Keep the shared prefix map current so IRI-resolving providers see up-to-date prefixes.
			if (type === 'PREFIX' || type === 'TTL_PREFIX') {
				const ns = getNamespaceDefinition(tokens, token);
 
				Eif (ns) {
					prefixes[ns.prefix] = ns.uri;
				}
			}
 
			for (const linter of this.linters) {
				result.push(...linter.visitToken(context, token, i));
			}
		}
 
		for (const linter of this.linters) {
			result.push(...(linter.finalize?.(context) ?? []));
		}
 
		return result;
	}
}