gdscript_tokenizer.cpp 46 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691
  1. /**************************************************************************/
  2. /* gdscript_tokenizer.cpp */
  3. /**************************************************************************/
  4. /* This file is part of: */
  5. /* GODOT ENGINE */
  6. /* https://godotengine.org */
  7. /**************************************************************************/
  8. /* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
  9. /* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
  10. /* */
  11. /* Permission is hereby granted, free of charge, to any person obtaining */
  12. /* a copy of this software and associated documentation files (the */
  13. /* "Software"), to deal in the Software without restriction, including */
  14. /* without limitation the rights to use, copy, modify, merge, publish, */
  15. /* distribute, sublicense, and/or sell copies of the Software, and to */
  16. /* permit persons to whom the Software is furnished to do so, subject to */
  17. /* the following conditions: */
  18. /* */
  19. /* The above copyright notice and this permission notice shall be */
  20. /* included in all copies or substantial portions of the Software. */
  21. /* */
  22. /* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
  23. /* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
  24. /* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
  25. /* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
  26. /* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
  27. /* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
  28. /* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
  29. /**************************************************************************/
  30. #include "gdscript_tokenizer.h"
  31. #include "core/error/error_macros.h"
  32. #include "core/string/char_utils.h"
  33. #ifdef DEBUG_ENABLED
  34. #include "servers/text_server.h"
  35. #endif
  36. #ifdef TOOLS_ENABLED
  37. #include "editor/editor_settings.h"
  38. #endif
  39. static const char *token_names[] = {
  40. "Empty", // EMPTY,
  41. // Basic
  42. "Annotation", // ANNOTATION
  43. "Identifier", // IDENTIFIER,
  44. "Literal", // LITERAL,
  45. // Comparison
  46. "<", // LESS,
  47. "<=", // LESS_EQUAL,
  48. ">", // GREATER,
  49. ">=", // GREATER_EQUAL,
  50. "==", // EQUAL_EQUAL,
  51. "!=", // BANG_EQUAL,
  52. // Logical
  53. "and", // AND,
  54. "or", // OR,
  55. "not", // NOT,
  56. "&&", // AMPERSAND_AMPERSAND,
  57. "||", // PIPE_PIPE,
  58. "!", // BANG,
  59. // Bitwise
  60. "&", // AMPERSAND,
  61. "|", // PIPE,
  62. "~", // TILDE,
  63. "^", // CARET,
  64. "<<", // LESS_LESS,
  65. ">>", // GREATER_GREATER,
  66. // Math
  67. "+", // PLUS,
  68. "-", // MINUS,
  69. "*", // STAR,
  70. "**", // STAR_STAR,
  71. "/", // SLASH,
  72. "%", // PERCENT,
  73. // Assignment
  74. "=", // EQUAL,
  75. "+=", // PLUS_EQUAL,
  76. "-=", // MINUS_EQUAL,
  77. "*=", // STAR_EQUAL,
  78. "**=", // STAR_STAR_EQUAL,
  79. "/=", // SLASH_EQUAL,
  80. "%=", // PERCENT_EQUAL,
  81. "<<=", // LESS_LESS_EQUAL,
  82. ">>=", // GREATER_GREATER_EQUAL,
  83. "&=", // AMPERSAND_EQUAL,
  84. "|=", // PIPE_EQUAL,
  85. "^=", // CARET_EQUAL,
  86. // Control flow
  87. "if", // IF,
  88. "elif", // ELIF,
  89. "else", // ELSE,
  90. "for", // FOR,
  91. "while", // WHILE,
  92. "break", // BREAK,
  93. "continue", // CONTINUE,
  94. "pass", // PASS,
  95. "return", // RETURN,
  96. "match", // MATCH,
  97. "when", // WHEN,
  98. // Keywords
  99. "as", // AS,
  100. "assert", // ASSERT,
  101. "await", // AWAIT,
  102. "breakpoint", // BREAKPOINT,
  103. "class", // CLASS,
  104. "class_name", // CLASS_NAME,
  105. "const", // CONST,
  106. "enum", // ENUM,
  107. "extends", // EXTENDS,
  108. "func", // FUNC,
  109. "in", // IN,
  110. "is", // IS,
  111. "namespace", // NAMESPACE
  112. "preload", // PRELOAD,
  113. "self", // SELF,
  114. "signal", // SIGNAL,
  115. "static", // STATIC,
  116. "super", // SUPER,
  117. "trait", // TRAIT,
  118. "var", // VAR,
  119. "void", // VOID,
  120. "yield", // YIELD,
  121. // Punctuation
  122. "[", // BRACKET_OPEN,
  123. "]", // BRACKET_CLOSE,
  124. "{", // BRACE_OPEN,
  125. "}", // BRACE_CLOSE,
  126. "(", // PARENTHESIS_OPEN,
  127. ")", // PARENTHESIS_CLOSE,
  128. ",", // COMMA,
  129. ";", // SEMICOLON,
  130. ".", // PERIOD,
  131. "..", // PERIOD_PERIOD,
  132. ":", // COLON,
  133. "$", // DOLLAR,
  134. "->", // FORWARD_ARROW,
  135. "_", // UNDERSCORE,
  136. // Whitespace
  137. "Newline", // NEWLINE,
  138. "Indent", // INDENT,
  139. "Dedent", // DEDENT,
  140. // Constants
  141. "PI", // CONST_PI,
  142. "TAU", // CONST_TAU,
  143. "INF", // CONST_INF,
  144. "NaN", // CONST_NAN,
  145. // Error message improvement
  146. "VCS conflict marker", // VCS_CONFLICT_MARKER,
  147. "`", // BACKTICK,
  148. "?", // QUESTION_MARK,
  149. // Special
  150. "Error", // ERROR,
  151. "End of file", // EOF,
  152. };
  153. // Avoid desync.
  154. static_assert(sizeof(token_names) / sizeof(token_names[0]) == GDScriptTokenizer::Token::TK_MAX, "Amount of token names don't match the amount of token types.");
  155. const char *GDScriptTokenizer::Token::get_name() const {
  156. ERR_FAIL_INDEX_V_MSG(type, TK_MAX, "<error>", "Using token type out of the enum.");
  157. return token_names[type];
  158. }
  159. bool GDScriptTokenizer::Token::can_precede_bin_op() const {
  160. switch (type) {
  161. case IDENTIFIER:
  162. case LITERAL:
  163. case SELF:
  164. case BRACKET_CLOSE:
  165. case BRACE_CLOSE:
  166. case PARENTHESIS_CLOSE:
  167. case CONST_PI:
  168. case CONST_TAU:
  169. case CONST_INF:
  170. case CONST_NAN:
  171. return true;
  172. default:
  173. return false;
  174. }
  175. }
  176. bool GDScriptTokenizer::Token::is_identifier() const {
  177. // Note: Most keywords should not be recognized as identifiers.
  178. // These are only exceptions for stuff that already is on the engine's API.
  179. switch (type) {
  180. case IDENTIFIER:
  181. case MATCH: // Used in String.match().
  182. case WHEN: // New keyword, avoid breaking existing code.
  183. // Allow constants to be treated as regular identifiers.
  184. case CONST_PI:
  185. case CONST_INF:
  186. case CONST_NAN:
  187. case CONST_TAU:
  188. return true;
  189. default:
  190. return false;
  191. }
  192. }
  193. bool GDScriptTokenizer::Token::is_node_name() const {
  194. // This is meant to allow keywords with the $ notation, but not as general identifiers.
  195. switch (type) {
  196. case IDENTIFIER:
  197. case AND:
  198. case AS:
  199. case ASSERT:
  200. case AWAIT:
  201. case BREAK:
  202. case BREAKPOINT:
  203. case CLASS_NAME:
  204. case CLASS:
  205. case CONST:
  206. case CONST_PI:
  207. case CONST_INF:
  208. case CONST_NAN:
  209. case CONST_TAU:
  210. case CONTINUE:
  211. case ELIF:
  212. case ELSE:
  213. case ENUM:
  214. case EXTENDS:
  215. case FOR:
  216. case FUNC:
  217. case IF:
  218. case IN:
  219. case IS:
  220. case MATCH:
  221. case NAMESPACE:
  222. case NOT:
  223. case OR:
  224. case PASS:
  225. case PRELOAD:
  226. case RETURN:
  227. case SELF:
  228. case SIGNAL:
  229. case STATIC:
  230. case SUPER:
  231. case TRAIT:
  232. case UNDERSCORE:
  233. case VAR:
  234. case VOID:
  235. case WHILE:
  236. case WHEN:
  237. case YIELD:
  238. return true;
  239. default:
  240. return false;
  241. }
  242. }
  243. String GDScriptTokenizer::get_token_name(Token::Type p_token_type) {
  244. ERR_FAIL_INDEX_V_MSG(p_token_type, Token::TK_MAX, "<error>", "Using token type out of the enum.");
  245. return token_names[p_token_type];
  246. }
  247. void GDScriptTokenizerText::set_source_code(const String &p_source_code) {
  248. source = p_source_code;
  249. if (source.is_empty()) {
  250. _source = U"";
  251. } else {
  252. _source = source.ptr();
  253. }
  254. _current = _source;
  255. line = 1;
  256. column = 1;
  257. length = p_source_code.length();
  258. position = 0;
  259. }
  260. void GDScriptTokenizerText::set_cursor_position(int p_line, int p_column) {
  261. cursor_line = p_line;
  262. cursor_column = p_column;
  263. }
  264. void GDScriptTokenizerText::set_multiline_mode(bool p_state) {
  265. multiline_mode = p_state;
  266. }
  267. void GDScriptTokenizerText::push_expression_indented_block() {
  268. indent_stack_stack.push_back(indent_stack);
  269. }
  270. void GDScriptTokenizerText::pop_expression_indented_block() {
  271. ERR_FAIL_COND(indent_stack_stack.is_empty());
  272. indent_stack = indent_stack_stack.back()->get();
  273. indent_stack_stack.pop_back();
  274. }
  275. int GDScriptTokenizerText::get_cursor_line() const {
  276. return cursor_line;
  277. }
  278. int GDScriptTokenizerText::get_cursor_column() const {
  279. return cursor_column;
  280. }
  281. bool GDScriptTokenizerText::is_past_cursor() const {
  282. if (line < cursor_line) {
  283. return false;
  284. }
  285. if (line > cursor_line) {
  286. return true;
  287. }
  288. if (column < cursor_column) {
  289. return false;
  290. }
  291. return true;
  292. }
  293. char32_t GDScriptTokenizerText::_advance() {
  294. if (unlikely(_is_at_end())) {
  295. return '\0';
  296. }
  297. _current++;
  298. column++;
  299. position++;
  300. if (column > rightmost_column) {
  301. rightmost_column = column;
  302. }
  303. if (unlikely(_is_at_end())) {
  304. // Add extra newline even if it's not there, to satisfy the parser.
  305. newline(true);
  306. // Also add needed unindent.
  307. check_indent();
  308. }
  309. return _peek(-1);
  310. }
  311. void GDScriptTokenizerText::push_paren(char32_t p_char) {
  312. paren_stack.push_back(p_char);
  313. }
  314. bool GDScriptTokenizerText::pop_paren(char32_t p_expected) {
  315. if (paren_stack.is_empty()) {
  316. return false;
  317. }
  318. char32_t actual = paren_stack.back()->get();
  319. paren_stack.pop_back();
  320. return actual == p_expected;
  321. }
  322. GDScriptTokenizer::Token GDScriptTokenizerText::pop_error() {
  323. Token error = error_stack.back()->get();
  324. error_stack.pop_back();
  325. return error;
  326. }
  327. GDScriptTokenizer::Token GDScriptTokenizerText::make_token(Token::Type p_type) {
  328. Token token(p_type);
  329. token.start_line = start_line;
  330. token.end_line = line;
  331. token.start_column = start_column;
  332. token.end_column = column;
  333. token.leftmost_column = leftmost_column;
  334. token.rightmost_column = rightmost_column;
  335. token.source = String(_start, _current - _start);
  336. if (p_type != Token::ERROR && cursor_line > -1) {
  337. // Also count whitespace after token.
  338. int offset = 0;
  339. while (_peek(offset) == ' ' || _peek(offset) == '\t') {
  340. offset++;
  341. }
  342. int last_column = column + offset;
  343. // Check cursor position in token.
  344. if (start_line == line) {
  345. // Single line token.
  346. if (cursor_line == start_line && cursor_column >= start_column && cursor_column <= last_column) {
  347. token.cursor_position = cursor_column - start_column;
  348. if (cursor_column == start_column) {
  349. token.cursor_place = CURSOR_BEGINNING;
  350. } else if (cursor_column < column) {
  351. token.cursor_place = CURSOR_MIDDLE;
  352. } else {
  353. token.cursor_place = CURSOR_END;
  354. }
  355. }
  356. } else {
  357. // Multi line token.
  358. if (cursor_line == start_line && cursor_column >= start_column) {
  359. // Is in first line.
  360. token.cursor_position = cursor_column - start_column;
  361. if (cursor_column == start_column) {
  362. token.cursor_place = CURSOR_BEGINNING;
  363. } else {
  364. token.cursor_place = CURSOR_MIDDLE;
  365. }
  366. } else if (cursor_line == line && cursor_column <= last_column) {
  367. // Is in last line.
  368. token.cursor_position = cursor_column - start_column;
  369. if (cursor_column < column) {
  370. token.cursor_place = CURSOR_MIDDLE;
  371. } else {
  372. token.cursor_place = CURSOR_END;
  373. }
  374. } else if (cursor_line > start_line && cursor_line < line) {
  375. // Is in middle line.
  376. token.cursor_position = CURSOR_MIDDLE;
  377. }
  378. }
  379. }
  380. last_token = token;
  381. return token;
  382. }
  383. GDScriptTokenizer::Token GDScriptTokenizerText::make_literal(const Variant &p_literal) {
  384. Token token = make_token(Token::LITERAL);
  385. token.literal = p_literal;
  386. return token;
  387. }
  388. GDScriptTokenizer::Token GDScriptTokenizerText::make_identifier(const StringName &p_identifier) {
  389. Token identifier = make_token(Token::IDENTIFIER);
  390. identifier.literal = p_identifier;
  391. return identifier;
  392. }
  393. GDScriptTokenizer::Token GDScriptTokenizerText::make_error(const String &p_message) {
  394. Token error = make_token(Token::ERROR);
  395. error.literal = p_message;
  396. return error;
  397. }
  398. void GDScriptTokenizerText::push_error(const String &p_message) {
  399. Token error = make_error(p_message);
  400. error_stack.push_back(error);
  401. }
  402. void GDScriptTokenizerText::push_error(const Token &p_error) {
  403. error_stack.push_back(p_error);
  404. }
  405. GDScriptTokenizer::Token GDScriptTokenizerText::make_paren_error(char32_t p_paren) {
  406. if (paren_stack.is_empty()) {
  407. return make_error(vformat("Closing \"%c\" doesn't have an opening counterpart.", p_paren));
  408. }
  409. Token error = make_error(vformat("Closing \"%c\" doesn't match the opening \"%c\".", p_paren, paren_stack.back()->get()));
  410. paren_stack.pop_back(); // Remove opening one anyway.
  411. return error;
  412. }
  413. GDScriptTokenizer::Token GDScriptTokenizerText::check_vcs_marker(char32_t p_test, Token::Type p_double_type) {
  414. const char32_t *next = _current + 1;
  415. int chars = 2; // Two already matched.
  416. // Test before consuming characters, since we don't want to consume more than needed.
  417. while (*next == p_test) {
  418. chars++;
  419. next++;
  420. }
  421. if (chars >= 7) {
  422. // It is a VCS conflict marker.
  423. while (chars > 1) {
  424. // Consume all characters (first was already consumed by scan()).
  425. _advance();
  426. chars--;
  427. }
  428. return make_token(Token::VCS_CONFLICT_MARKER);
  429. } else {
  430. // It is only a regular double character token, so we consume the second character.
  431. _advance();
  432. return make_token(p_double_type);
  433. }
  434. }
  435. GDScriptTokenizer::Token GDScriptTokenizerText::annotation() {
  436. if (is_unicode_identifier_start(_peek())) {
  437. _advance(); // Consume start character.
  438. } else {
  439. push_error("Expected annotation identifier after \"@\".");
  440. }
  441. while (is_unicode_identifier_continue(_peek())) {
  442. // Consume all identifier characters.
  443. _advance();
  444. }
  445. Token annotation = make_token(Token::ANNOTATION);
  446. annotation.literal = StringName(annotation.source);
  447. return annotation;
  448. }
  449. #define KEYWORDS(KEYWORD_GROUP, KEYWORD) \
  450. KEYWORD_GROUP('a') \
  451. KEYWORD("as", Token::AS) \
  452. KEYWORD("and", Token::AND) \
  453. KEYWORD("assert", Token::ASSERT) \
  454. KEYWORD("await", Token::AWAIT) \
  455. KEYWORD_GROUP('b') \
  456. KEYWORD("break", Token::BREAK) \
  457. KEYWORD("breakpoint", Token::BREAKPOINT) \
  458. KEYWORD_GROUP('c') \
  459. KEYWORD("class", Token::CLASS) \
  460. KEYWORD("class_name", Token::CLASS_NAME) \
  461. KEYWORD("const", Token::CONST) \
  462. KEYWORD("continue", Token::CONTINUE) \
  463. KEYWORD_GROUP('e') \
  464. KEYWORD("elif", Token::ELIF) \
  465. KEYWORD("else", Token::ELSE) \
  466. KEYWORD("enum", Token::ENUM) \
  467. KEYWORD("extends", Token::EXTENDS) \
  468. KEYWORD_GROUP('f') \
  469. KEYWORD("for", Token::FOR) \
  470. KEYWORD("func", Token::FUNC) \
  471. KEYWORD_GROUP('i') \
  472. KEYWORD("if", Token::IF) \
  473. KEYWORD("in", Token::IN) \
  474. KEYWORD("is", Token::IS) \
  475. KEYWORD_GROUP('m') \
  476. KEYWORD("match", Token::MATCH) \
  477. KEYWORD_GROUP('n') \
  478. KEYWORD("namespace", Token::NAMESPACE) \
  479. KEYWORD("not", Token::NOT) \
  480. KEYWORD_GROUP('o') \
  481. KEYWORD("or", Token::OR) \
  482. KEYWORD_GROUP('p') \
  483. KEYWORD("pass", Token::PASS) \
  484. KEYWORD("preload", Token::PRELOAD) \
  485. KEYWORD_GROUP('r') \
  486. KEYWORD("return", Token::RETURN) \
  487. KEYWORD_GROUP('s') \
  488. KEYWORD("self", Token::SELF) \
  489. KEYWORD("signal", Token::SIGNAL) \
  490. KEYWORD("static", Token::STATIC) \
  491. KEYWORD("super", Token::SUPER) \
  492. KEYWORD_GROUP('t') \
  493. KEYWORD("trait", Token::TRAIT) \
  494. KEYWORD_GROUP('v') \
  495. KEYWORD("var", Token::VAR) \
  496. KEYWORD("void", Token::VOID) \
  497. KEYWORD_GROUP('w') \
  498. KEYWORD("while", Token::WHILE) \
  499. KEYWORD("when", Token::WHEN) \
  500. KEYWORD_GROUP('y') \
  501. KEYWORD("yield", Token::YIELD) \
  502. KEYWORD_GROUP('I') \
  503. KEYWORD("INF", Token::CONST_INF) \
  504. KEYWORD_GROUP('N') \
  505. KEYWORD("NAN", Token::CONST_NAN) \
  506. KEYWORD_GROUP('P') \
  507. KEYWORD("PI", Token::CONST_PI) \
  508. KEYWORD_GROUP('T') \
  509. KEYWORD("TAU", Token::CONST_TAU)
  510. #define MIN_KEYWORD_LENGTH 2
  511. #define MAX_KEYWORD_LENGTH 10
  512. #ifdef DEBUG_ENABLED
  513. void GDScriptTokenizerText::make_keyword_list() {
  514. #define KEYWORD_LINE(keyword, token_type) keyword,
  515. #define KEYWORD_GROUP_IGNORE(group)
  516. keyword_list = {
  517. KEYWORDS(KEYWORD_GROUP_IGNORE, KEYWORD_LINE)
  518. };
  519. #undef KEYWORD_LINE
  520. #undef KEYWORD_GROUP_IGNORE
  521. }
  522. #endif // DEBUG_ENABLED
  523. GDScriptTokenizer::Token GDScriptTokenizerText::potential_identifier() {
  524. bool only_ascii = _peek(-1) < 128;
  525. // Consume all identifier characters.
  526. while (is_unicode_identifier_continue(_peek())) {
  527. char32_t c = _advance();
  528. only_ascii = only_ascii && c < 128;
  529. }
  530. int len = _current - _start;
  531. if (len == 1 && _peek(-1) == '_') {
  532. // Lone underscore.
  533. Token token = make_token(Token::UNDERSCORE);
  534. token.literal = "_";
  535. return token;
  536. }
  537. String name(_start, len);
  538. if (len < MIN_KEYWORD_LENGTH || len > MAX_KEYWORD_LENGTH) {
  539. // Cannot be a keyword, as the length doesn't match any.
  540. return make_identifier(name);
  541. }
  542. if (!only_ascii) {
  543. // Kept here in case the order with push_error matters.
  544. Token id = make_identifier(name);
  545. #ifdef DEBUG_ENABLED
  546. // Additional checks for identifiers but only in debug and if it's available in TextServer.
  547. if (TS->has_feature(TextServer::FEATURE_UNICODE_SECURITY)) {
  548. int64_t confusable = TS->is_confusable(name, keyword_list);
  549. if (confusable >= 0) {
  550. push_error(vformat(R"(Identifier "%s" is visually similar to the GDScript keyword "%s" and thus not allowed.)", name, keyword_list[confusable]));
  551. }
  552. }
  553. #endif // DEBUG_ENABLED
  554. // Cannot be a keyword, as keywords are ASCII only.
  555. return id;
  556. }
  557. // Define some helper macros for the switch case.
  558. #define KEYWORD_GROUP_CASE(char) \
  559. break; \
  560. case char:
  561. #define KEYWORD(keyword, token_type) \
  562. { \
  563. const int keyword_length = sizeof(keyword) - 1; \
  564. static_assert(keyword_length <= MAX_KEYWORD_LENGTH, "There's a keyword longer than the defined maximum length"); \
  565. static_assert(keyword_length >= MIN_KEYWORD_LENGTH, "There's a keyword shorter than the defined minimum length"); \
  566. if (keyword_length == len && name == keyword) { \
  567. Token kw = make_token(token_type); \
  568. kw.literal = name; \
  569. return kw; \
  570. } \
  571. }
  572. // Find if it's a keyword.
  573. switch (_start[0]) {
  574. default:
  575. KEYWORDS(KEYWORD_GROUP_CASE, KEYWORD)
  576. break;
  577. }
  578. // Check if it's a special literal
  579. if (len == 4) {
  580. if (name == "true") {
  581. return make_literal(true);
  582. } else if (name == "null") {
  583. return make_literal(Variant());
  584. }
  585. } else if (len == 5) {
  586. if (name == "false") {
  587. return make_literal(false);
  588. }
  589. }
  590. // Not a keyword, so must be an identifier.
  591. return make_identifier(name);
  592. #undef KEYWORD_GROUP_CASE
  593. #undef KEYWORD
  594. }
  595. #undef MAX_KEYWORD_LENGTH
  596. #undef MIN_KEYWORD_LENGTH
  597. #undef KEYWORDS
  598. void GDScriptTokenizerText::newline(bool p_make_token) {
  599. // Don't overwrite previous newline, nor create if we want a line continuation.
  600. if (p_make_token && !pending_newline && !line_continuation) {
  601. Token newline(Token::NEWLINE);
  602. newline.start_line = line;
  603. newline.end_line = line;
  604. newline.start_column = column - 1;
  605. newline.end_column = column;
  606. newline.leftmost_column = newline.start_column;
  607. newline.rightmost_column = newline.end_column;
  608. pending_newline = true;
  609. last_token = newline;
  610. last_newline = newline;
  611. }
  612. // Increment line/column counters.
  613. line++;
  614. column = 1;
  615. leftmost_column = 1;
  616. }
  617. GDScriptTokenizer::Token GDScriptTokenizerText::number() {
  618. int base = 10;
  619. bool has_decimal = false;
  620. bool has_exponent = false;
  621. bool has_error = false;
  622. bool need_digits = false;
  623. bool (*digit_check_func)(char32_t) = is_digit;
  624. // Sign before hexadecimal or binary.
  625. if ((_peek(-1) == '+' || _peek(-1) == '-') && _peek() == '0') {
  626. _advance();
  627. }
  628. if (_peek(-1) == '.') {
  629. has_decimal = true;
  630. } else if (_peek(-1) == '0') {
  631. if (_peek() == 'x') {
  632. // Hexadecimal.
  633. base = 16;
  634. digit_check_func = is_hex_digit;
  635. need_digits = true;
  636. _advance();
  637. } else if (_peek() == 'b') {
  638. // Binary.
  639. base = 2;
  640. digit_check_func = is_binary_digit;
  641. need_digits = true;
  642. _advance();
  643. }
  644. }
  645. if (base != 10 && is_underscore(_peek())) { // Disallow `0x_` and `0b_`.
  646. Token error = make_error(vformat(R"(Unexpected underscore after "0%c".)", _peek(-1)));
  647. error.start_column = column;
  648. error.leftmost_column = column;
  649. error.end_column = column + 1;
  650. error.rightmost_column = column + 1;
  651. push_error(error);
  652. has_error = true;
  653. }
  654. bool previous_was_underscore = false; // Allow `_` to be used in a number, for readability.
  655. while (digit_check_func(_peek()) || is_underscore(_peek())) {
  656. if (is_underscore(_peek())) {
  657. if (previous_was_underscore) {
  658. Token error = make_error(R"(Multiple underscores cannot be adjacent in a numeric literal.)");
  659. error.start_column = column;
  660. error.leftmost_column = column;
  661. error.end_column = column + 1;
  662. error.rightmost_column = column + 1;
  663. push_error(error);
  664. }
  665. previous_was_underscore = true;
  666. } else {
  667. need_digits = false;
  668. previous_was_underscore = false;
  669. }
  670. _advance();
  671. }
  672. // It might be a ".." token (instead of decimal point) so we check if it's not.
  673. if (_peek() == '.' && _peek(1) != '.') {
  674. if (base == 10 && !has_decimal) {
  675. has_decimal = true;
  676. } else if (base == 10) {
  677. Token error = make_error("Cannot use a decimal point twice in a number.");
  678. error.start_column = column;
  679. error.leftmost_column = column;
  680. error.end_column = column + 1;
  681. error.rightmost_column = column + 1;
  682. push_error(error);
  683. has_error = true;
  684. } else if (base == 16) {
  685. Token error = make_error("Cannot use a decimal point in a hexadecimal number.");
  686. error.start_column = column;
  687. error.leftmost_column = column;
  688. error.end_column = column + 1;
  689. error.rightmost_column = column + 1;
  690. push_error(error);
  691. has_error = true;
  692. } else {
  693. Token error = make_error("Cannot use a decimal point in a binary number.");
  694. error.start_column = column;
  695. error.leftmost_column = column;
  696. error.end_column = column + 1;
  697. error.rightmost_column = column + 1;
  698. push_error(error);
  699. has_error = true;
  700. }
  701. if (!has_error) {
  702. _advance();
  703. // Consume decimal digits.
  704. if (is_underscore(_peek())) { // Disallow `10._`, but allow `10.`.
  705. Token error = make_error(R"(Unexpected underscore after decimal point.)");
  706. error.start_column = column;
  707. error.leftmost_column = column;
  708. error.end_column = column + 1;
  709. error.rightmost_column = column + 1;
  710. push_error(error);
  711. has_error = true;
  712. }
  713. previous_was_underscore = false;
  714. while (is_digit(_peek()) || is_underscore(_peek())) {
  715. if (is_underscore(_peek())) {
  716. if (previous_was_underscore) {
  717. Token error = make_error(R"(Multiple underscores cannot be adjacent in a numeric literal.)");
  718. error.start_column = column;
  719. error.leftmost_column = column;
  720. error.end_column = column + 1;
  721. error.rightmost_column = column + 1;
  722. push_error(error);
  723. }
  724. previous_was_underscore = true;
  725. } else {
  726. previous_was_underscore = false;
  727. }
  728. _advance();
  729. }
  730. }
  731. }
  732. if (base == 10) {
  733. if (_peek() == 'e' || _peek() == 'E') {
  734. has_exponent = true;
  735. _advance();
  736. if (_peek() == '+' || _peek() == '-') {
  737. // Exponent sign.
  738. _advance();
  739. }
  740. // Consume exponent digits.
  741. if (!is_digit(_peek())) {
  742. Token error = make_error(R"(Expected exponent value after "e".)");
  743. error.start_column = column;
  744. error.leftmost_column = column;
  745. error.end_column = column + 1;
  746. error.rightmost_column = column + 1;
  747. push_error(error);
  748. }
  749. previous_was_underscore = false;
  750. while (is_digit(_peek()) || is_underscore(_peek())) {
  751. if (is_underscore(_peek())) {
  752. if (previous_was_underscore) {
  753. Token error = make_error(R"(Multiple underscores cannot be adjacent in a numeric literal.)");
  754. error.start_column = column;
  755. error.leftmost_column = column;
  756. error.end_column = column + 1;
  757. error.rightmost_column = column + 1;
  758. push_error(error);
  759. }
  760. previous_was_underscore = true;
  761. } else {
  762. previous_was_underscore = false;
  763. }
  764. _advance();
  765. }
  766. }
  767. }
  768. if (need_digits) {
  769. // No digits in hex or bin literal.
  770. Token error = make_error(vformat(R"(Expected %s digit after "0%c".)", (base == 16 ? "hexadecimal" : "binary"), (base == 16 ? 'x' : 'b')));
  771. error.start_column = column;
  772. error.leftmost_column = column;
  773. error.end_column = column + 1;
  774. error.rightmost_column = column + 1;
  775. return error;
  776. }
  777. // Detect extra decimal point.
  778. if (!has_error && has_decimal && _peek() == '.' && _peek(1) != '.') {
  779. Token error = make_error("Cannot use a decimal point twice in a number.");
  780. error.start_column = column;
  781. error.leftmost_column = column;
  782. error.end_column = column + 1;
  783. error.rightmost_column = column + 1;
  784. push_error(error);
  785. has_error = true;
  786. } else if (is_unicode_identifier_start(_peek()) || is_unicode_identifier_continue(_peek())) {
  787. // Letter at the end of the number.
  788. push_error("Invalid numeric notation.");
  789. }
  790. // Create a string with the whole number.
  791. int len = _current - _start;
  792. String number = String(_start, len).replace("_", "");
  793. // Convert to the appropriate literal type.
  794. if (base == 16) {
  795. int64_t value = number.hex_to_int();
  796. return make_literal(value);
  797. } else if (base == 2) {
  798. int64_t value = number.bin_to_int();
  799. return make_literal(value);
  800. } else if (has_decimal || has_exponent) {
  801. double value = number.to_float();
  802. return make_literal(value);
  803. } else {
  804. int64_t value = number.to_int();
  805. return make_literal(value);
  806. }
  807. }
  808. GDScriptTokenizer::Token GDScriptTokenizerText::string() {
  809. enum StringType {
  810. STRING_REGULAR,
  811. STRING_NAME,
  812. STRING_NODEPATH,
  813. };
  814. bool is_raw = false;
  815. bool is_multiline = false;
  816. StringType type = STRING_REGULAR;
  817. if (_peek(-1) == 'r') {
  818. is_raw = true;
  819. _advance();
  820. } else if (_peek(-1) == '&') {
  821. type = STRING_NAME;
  822. _advance();
  823. } else if (_peek(-1) == '^') {
  824. type = STRING_NODEPATH;
  825. _advance();
  826. }
  827. char32_t quote_char = _peek(-1);
  828. if (_peek() == quote_char && _peek(1) == quote_char) {
  829. is_multiline = true;
  830. // Consume all quotes.
  831. _advance();
  832. _advance();
  833. }
  834. String result;
  835. char32_t prev = 0;
  836. int prev_pos = 0;
  837. for (;;) {
  838. // Consume actual string.
  839. if (_is_at_end()) {
  840. return make_error("Unterminated string.");
  841. }
  842. char32_t ch = _peek();
  843. if (ch == 0x200E || ch == 0x200F || (ch >= 0x202A && ch <= 0x202E) || (ch >= 0x2066 && ch <= 0x2069)) {
  844. Token error;
  845. if (is_raw) {
  846. error = make_error("Invisible text direction control character present in the string, use regular string literal instead of r-string.");
  847. } else {
  848. error = make_error("Invisible text direction control character present in the string, escape it (\"\\u" + String::num_int64(ch, 16) + "\") to avoid confusion.");
  849. }
  850. error.start_column = column;
  851. error.leftmost_column = error.start_column;
  852. error.end_column = column + 1;
  853. error.rightmost_column = error.end_column;
  854. push_error(error);
  855. }
  856. if (ch == '\\') {
  857. // Escape pattern.
  858. _advance();
  859. if (_is_at_end()) {
  860. return make_error("Unterminated string.");
  861. }
  862. if (is_raw) {
  863. if (_peek() == quote_char) {
  864. _advance();
  865. if (_is_at_end()) {
  866. return make_error("Unterminated string.");
  867. }
  868. result += '\\';
  869. result += quote_char;
  870. } else if (_peek() == '\\') { // For `\\\"`.
  871. _advance();
  872. if (_is_at_end()) {
  873. return make_error("Unterminated string.");
  874. }
  875. result += '\\';
  876. result += '\\';
  877. } else {
  878. result += '\\';
  879. }
  880. } else {
  881. // Grab escape character.
  882. char32_t code = _peek();
  883. _advance();
  884. if (_is_at_end()) {
  885. return make_error("Unterminated string.");
  886. }
  887. char32_t escaped = 0;
  888. bool valid_escape = true;
  889. switch (code) {
  890. case 'a':
  891. escaped = '\a';
  892. break;
  893. case 'b':
  894. escaped = '\b';
  895. break;
  896. case 'f':
  897. escaped = '\f';
  898. break;
  899. case 'n':
  900. escaped = '\n';
  901. break;
  902. case 'r':
  903. escaped = '\r';
  904. break;
  905. case 't':
  906. escaped = '\t';
  907. break;
  908. case 'v':
  909. escaped = '\v';
  910. break;
  911. case '\'':
  912. escaped = '\'';
  913. break;
  914. case '\"':
  915. escaped = '\"';
  916. break;
  917. case '\\':
  918. escaped = '\\';
  919. break;
  920. case 'U':
  921. case 'u': {
  922. // Hexadecimal sequence.
  923. int hex_len = (code == 'U') ? 6 : 4;
  924. for (int j = 0; j < hex_len; j++) {
  925. if (_is_at_end()) {
  926. return make_error("Unterminated string.");
  927. }
  928. char32_t digit = _peek();
  929. char32_t value = 0;
  930. if (is_digit(digit)) {
  931. value = digit - '0';
  932. } else if (digit >= 'a' && digit <= 'f') {
  933. value = digit - 'a';
  934. value += 10;
  935. } else if (digit >= 'A' && digit <= 'F') {
  936. value = digit - 'A';
  937. value += 10;
  938. } else {
  939. // Make error, but keep parsing the string.
  940. Token error = make_error("Invalid hexadecimal digit in unicode escape sequence.");
  941. error.start_column = column;
  942. error.leftmost_column = error.start_column;
  943. error.end_column = column + 1;
  944. error.rightmost_column = error.end_column;
  945. push_error(error);
  946. valid_escape = false;
  947. break;
  948. }
  949. escaped <<= 4;
  950. escaped |= value;
  951. _advance();
  952. }
  953. } break;
  954. case '\r':
  955. if (_peek() != '\n') {
  956. // Carriage return without newline in string. (???)
  957. // Just add it to the string and keep going.
  958. result += ch;
  959. _advance();
  960. break;
  961. }
  962. [[fallthrough]];
  963. case '\n':
  964. // Escaping newline.
  965. newline(false);
  966. valid_escape = false; // Don't add to the string.
  967. break;
  968. default:
  969. Token error = make_error("Invalid escape in string.");
  970. error.start_column = column - 2;
  971. error.leftmost_column = error.start_column;
  972. push_error(error);
  973. valid_escape = false;
  974. break;
  975. }
  976. // Parse UTF-16 pair.
  977. if (valid_escape) {
  978. if ((escaped & 0xfffffc00) == 0xd800) {
  979. if (prev == 0) {
  980. prev = escaped;
  981. prev_pos = column - 2;
  982. continue;
  983. } else {
  984. Token error = make_error("Invalid UTF-16 sequence in string, unpaired lead surrogate.");
  985. error.start_column = column - 2;
  986. error.leftmost_column = error.start_column;
  987. push_error(error);
  988. valid_escape = false;
  989. prev = 0;
  990. }
  991. } else if ((escaped & 0xfffffc00) == 0xdc00) {
  992. if (prev == 0) {
  993. Token error = make_error("Invalid UTF-16 sequence in string, unpaired trail surrogate.");
  994. error.start_column = column - 2;
  995. error.leftmost_column = error.start_column;
  996. push_error(error);
  997. valid_escape = false;
  998. } else {
  999. escaped = (prev << 10UL) + escaped - ((0xd800 << 10UL) + 0xdc00 - 0x10000);
  1000. prev = 0;
  1001. }
  1002. }
  1003. if (prev != 0) {
  1004. Token error = make_error("Invalid UTF-16 sequence in string, unpaired lead surrogate.");
  1005. error.start_column = prev_pos;
  1006. error.leftmost_column = error.start_column;
  1007. push_error(error);
  1008. prev = 0;
  1009. }
  1010. }
  1011. if (valid_escape) {
  1012. result += escaped;
  1013. }
  1014. }
  1015. } else if (ch == quote_char) {
  1016. if (prev != 0) {
  1017. Token error = make_error("Invalid UTF-16 sequence in string, unpaired lead surrogate");
  1018. error.start_column = prev_pos;
  1019. error.leftmost_column = error.start_column;
  1020. push_error(error);
  1021. prev = 0;
  1022. }
  1023. _advance();
  1024. if (is_multiline) {
  1025. if (_peek() == quote_char && _peek(1) == quote_char) {
  1026. // Ended the multiline string. Consume all quotes.
  1027. _advance();
  1028. _advance();
  1029. break;
  1030. } else {
  1031. // Not a multiline string termination, add consumed quote.
  1032. result += quote_char;
  1033. }
  1034. } else {
  1035. // Ended single-line string.
  1036. break;
  1037. }
  1038. } else {
  1039. if (prev != 0) {
  1040. Token error = make_error("Invalid UTF-16 sequence in string, unpaired lead surrogate");
  1041. error.start_column = prev_pos;
  1042. error.leftmost_column = error.start_column;
  1043. push_error(error);
  1044. prev = 0;
  1045. }
  1046. result += ch;
  1047. _advance();
  1048. if (ch == '\n') {
  1049. newline(false);
  1050. }
  1051. }
  1052. }
  1053. if (prev != 0) {
  1054. Token error = make_error("Invalid UTF-16 sequence in string, unpaired lead surrogate");
  1055. error.start_column = prev_pos;
  1056. error.leftmost_column = error.start_column;
  1057. push_error(error);
  1058. prev = 0;
  1059. }
  1060. // Make the literal.
  1061. Variant string;
  1062. switch (type) {
  1063. case STRING_NAME:
  1064. string = StringName(result);
  1065. break;
  1066. case STRING_NODEPATH:
  1067. string = NodePath(result);
  1068. break;
  1069. case STRING_REGULAR:
  1070. string = result;
  1071. break;
  1072. }
  1073. return make_literal(string);
  1074. }
  1075. void GDScriptTokenizerText::check_indent() {
  1076. ERR_FAIL_COND_MSG(column != 1, "Checking tokenizer indentation in the middle of a line.");
  1077. if (_is_at_end()) {
  1078. // Send dedents for every indent level.
  1079. pending_indents -= indent_level();
  1080. indent_stack.clear();
  1081. return;
  1082. }
  1083. for (;;) {
  1084. char32_t current_indent_char = _peek();
  1085. int indent_count = 0;
  1086. if (current_indent_char != ' ' && current_indent_char != '\t' && current_indent_char != '\r' && current_indent_char != '\n' && current_indent_char != '#') {
  1087. // First character of the line is not whitespace, so we clear all indentation levels.
  1088. // Unless we are in a continuation or in multiline mode (inside expression).
  1089. if (line_continuation || multiline_mode) {
  1090. return;
  1091. }
  1092. pending_indents -= indent_level();
  1093. indent_stack.clear();
  1094. return;
  1095. }
  1096. if (_peek() == '\r') {
  1097. _advance();
  1098. if (_peek() != '\n') {
  1099. push_error("Stray carriage return character in source code.");
  1100. }
  1101. }
  1102. if (_peek() == '\n') {
  1103. // Empty line, keep going.
  1104. _advance();
  1105. newline(false);
  1106. continue;
  1107. }
  1108. // Check indent level.
  1109. bool mixed = false;
  1110. while (!_is_at_end()) {
  1111. char32_t space = _peek();
  1112. if (space == '\t') {
  1113. // Consider individual tab columns.
  1114. column += tab_size - 1;
  1115. indent_count += tab_size;
  1116. } else if (space == ' ') {
  1117. indent_count += 1;
  1118. } else {
  1119. break;
  1120. }
  1121. mixed = mixed || space != current_indent_char;
  1122. _advance();
  1123. }
  1124. if (_is_at_end()) {
  1125. // Reached the end with an empty line, so just dedent as much as needed.
  1126. pending_indents -= indent_level();
  1127. indent_stack.clear();
  1128. return;
  1129. }
  1130. if (_peek() == '\r') {
  1131. _advance();
  1132. if (_peek() != '\n') {
  1133. push_error("Stray carriage return character in source code.");
  1134. }
  1135. }
  1136. if (_peek() == '\n') {
  1137. // Empty line, keep going.
  1138. _advance();
  1139. newline(false);
  1140. continue;
  1141. }
  1142. if (_peek() == '#') {
  1143. // Comment. Advance to the next line.
  1144. #ifdef TOOLS_ENABLED
  1145. String comment;
  1146. while (_peek() != '\n' && !_is_at_end()) {
  1147. comment += _advance();
  1148. }
  1149. comments[line] = CommentData(comment, true);
  1150. #else
  1151. while (_peek() != '\n' && !_is_at_end()) {
  1152. _advance();
  1153. }
  1154. #endif // TOOLS_ENABLED
  1155. if (_is_at_end()) {
  1156. // Reached the end with an empty line, so just dedent as much as needed.
  1157. pending_indents -= indent_level();
  1158. indent_stack.clear();
  1159. return;
  1160. }
  1161. _advance(); // Consume '\n'.
  1162. newline(false);
  1163. continue;
  1164. }
  1165. if (mixed && !line_continuation && !multiline_mode) {
  1166. Token error = make_error("Mixed use of tabs and spaces for indentation.");
  1167. error.start_line = line;
  1168. error.start_column = 1;
  1169. error.leftmost_column = 1;
  1170. error.rightmost_column = column;
  1171. push_error(error);
  1172. }
  1173. if (line_continuation || multiline_mode) {
  1174. // We cleared up all the whitespace at the beginning of the line.
  1175. // If this is a line continuation or we're in multiline mode then we don't want any indentation changes.
  1176. return;
  1177. }
  1178. // Check if indentation character is consistent.
  1179. if (indent_char == '\0') {
  1180. // First time indenting, choose character now.
  1181. indent_char = current_indent_char;
  1182. } else if (current_indent_char != indent_char) {
  1183. Token error = make_error(vformat("Used %s character for indentation instead of %s as used before in the file.",
  1184. _get_indent_char_name(current_indent_char), _get_indent_char_name(indent_char)));
  1185. error.start_line = line;
  1186. error.start_column = 1;
  1187. error.leftmost_column = 1;
  1188. error.rightmost_column = column;
  1189. push_error(error);
  1190. }
  1191. // Now we can do actual indentation changes.
  1192. // Check if indent or dedent.
  1193. int previous_indent = 0;
  1194. if (indent_level() > 0) {
  1195. previous_indent = indent_stack.back()->get();
  1196. }
  1197. if (indent_count == previous_indent) {
  1198. // No change in indentation.
  1199. return;
  1200. }
  1201. if (indent_count > previous_indent) {
  1202. // Indentation increased.
  1203. indent_stack.push_back(indent_count);
  1204. pending_indents++;
  1205. } else {
  1206. // Indentation decreased (dedent).
  1207. if (indent_level() == 0) {
  1208. push_error("Tokenizer bug: trying to dedent without previous indent.");
  1209. return;
  1210. }
  1211. while (indent_level() > 0 && indent_stack.back()->get() > indent_count) {
  1212. indent_stack.pop_back();
  1213. pending_indents--;
  1214. }
  1215. if ((indent_level() > 0 && indent_stack.back()->get() != indent_count) || (indent_level() == 0 && indent_count != 0)) {
  1216. // Mismatched indentation alignment.
  1217. Token error = make_error("Unindent doesn't match the previous indentation level.");
  1218. error.start_line = line;
  1219. error.start_column = 1;
  1220. error.leftmost_column = 1;
  1221. error.end_column = column + 1;
  1222. error.rightmost_column = column + 1;
  1223. push_error(error);
  1224. // Still, we'll be lenient and keep going, so keep this level in the stack.
  1225. indent_stack.push_back(indent_count);
  1226. }
  1227. }
  1228. break; // Get out of the loop in any case.
  1229. }
  1230. }
  1231. String GDScriptTokenizerText::_get_indent_char_name(char32_t ch) {
  1232. ERR_FAIL_COND_V(ch != ' ' && ch != '\t', String(&ch, 1).c_escape());
  1233. return ch == ' ' ? "space" : "tab";
  1234. }
  1235. void GDScriptTokenizerText::_skip_whitespace() {
  1236. if (pending_indents != 0) {
  1237. // Still have some indent/dedent tokens to give.
  1238. return;
  1239. }
  1240. bool is_bol = column == 1; // Beginning of line.
  1241. if (is_bol) {
  1242. check_indent();
  1243. return;
  1244. }
  1245. for (;;) {
  1246. char32_t c = _peek();
  1247. switch (c) {
  1248. case ' ':
  1249. _advance();
  1250. break;
  1251. case '\t':
  1252. _advance();
  1253. // Consider individual tab columns.
  1254. column += tab_size - 1;
  1255. break;
  1256. case '\r':
  1257. _advance(); // Consume either way.
  1258. if (_peek() != '\n') {
  1259. push_error("Stray carriage return character in source code.");
  1260. return;
  1261. }
  1262. break;
  1263. case '\n':
  1264. _advance();
  1265. newline(!is_bol); // Don't create new line token if line is empty.
  1266. check_indent();
  1267. break;
  1268. case '#': {
  1269. // Comment.
  1270. #ifdef TOOLS_ENABLED
  1271. String comment;
  1272. while (_peek() != '\n' && !_is_at_end()) {
  1273. comment += _advance();
  1274. }
  1275. comments[line] = CommentData(comment, is_bol);
  1276. #else
  1277. while (_peek() != '\n' && !_is_at_end()) {
  1278. _advance();
  1279. }
  1280. #endif // TOOLS_ENABLED
  1281. if (_is_at_end()) {
  1282. return;
  1283. }
  1284. _advance(); // Consume '\n'
  1285. newline(!is_bol);
  1286. check_indent();
  1287. } break;
  1288. default:
  1289. return;
  1290. }
  1291. }
  1292. }
  1293. GDScriptTokenizer::Token GDScriptTokenizerText::scan() {
  1294. if (has_error()) {
  1295. return pop_error();
  1296. }
  1297. _skip_whitespace();
  1298. if (pending_newline) {
  1299. pending_newline = false;
  1300. if (!multiline_mode) {
  1301. // Don't return newline tokens on multiline mode.
  1302. return last_newline;
  1303. }
  1304. }
  1305. // Check for potential errors after skipping whitespace().
  1306. if (has_error()) {
  1307. return pop_error();
  1308. }
  1309. _start = _current;
  1310. start_line = line;
  1311. start_column = column;
  1312. leftmost_column = column;
  1313. rightmost_column = column;
  1314. if (pending_indents != 0) {
  1315. // Adjust position for indent.
  1316. _start -= start_column - 1;
  1317. start_column = 1;
  1318. leftmost_column = 1;
  1319. if (pending_indents > 0) {
  1320. // Indents.
  1321. pending_indents--;
  1322. return make_token(Token::INDENT);
  1323. } else {
  1324. // Dedents.
  1325. pending_indents++;
  1326. Token dedent = make_token(Token::DEDENT);
  1327. dedent.end_column += 1;
  1328. dedent.rightmost_column += 1;
  1329. return dedent;
  1330. }
  1331. }
  1332. if (_is_at_end()) {
  1333. return make_token(Token::TK_EOF);
  1334. }
  1335. const char32_t c = _advance();
  1336. if (c == '\\') {
  1337. // Line continuation with backslash.
  1338. if (_peek() == '\r') {
  1339. if (_peek(1) != '\n') {
  1340. return make_error("Unexpected carriage return character.");
  1341. }
  1342. _advance();
  1343. }
  1344. if (_peek() != '\n') {
  1345. return make_error("Expected new line after \"\\\".");
  1346. }
  1347. _advance();
  1348. newline(false);
  1349. line_continuation = true;
  1350. _skip_whitespace(); // Skip whitespace/comment lines after `\`. See GH-89403.
  1351. continuation_lines.push_back(line);
  1352. return scan(); // Recurse to get next token.
  1353. }
  1354. line_continuation = false;
  1355. if (is_digit(c)) {
  1356. return number();
  1357. } else if (c == 'r' && (_peek() == '"' || _peek() == '\'')) {
  1358. // Raw string literals.
  1359. return string();
  1360. } else if (is_unicode_identifier_start(c)) {
  1361. return potential_identifier();
  1362. }
  1363. switch (c) {
  1364. // String literals.
  1365. case '"':
  1366. case '\'':
  1367. return string();
  1368. // Annotation.
  1369. case '@':
  1370. return annotation();
  1371. // Single characters.
  1372. case '~':
  1373. return make_token(Token::TILDE);
  1374. case ',':
  1375. return make_token(Token::COMMA);
  1376. case ':':
  1377. return make_token(Token::COLON);
  1378. case ';':
  1379. return make_token(Token::SEMICOLON);
  1380. case '$':
  1381. return make_token(Token::DOLLAR);
  1382. case '?':
  1383. return make_token(Token::QUESTION_MARK);
  1384. case '`':
  1385. return make_token(Token::BACKTICK);
  1386. // Parens.
  1387. case '(':
  1388. push_paren('(');
  1389. return make_token(Token::PARENTHESIS_OPEN);
  1390. case '[':
  1391. push_paren('[');
  1392. return make_token(Token::BRACKET_OPEN);
  1393. case '{':
  1394. push_paren('{');
  1395. return make_token(Token::BRACE_OPEN);
  1396. case ')':
  1397. if (!pop_paren('(')) {
  1398. return make_paren_error(c);
  1399. }
  1400. return make_token(Token::PARENTHESIS_CLOSE);
  1401. case ']':
  1402. if (!pop_paren('[')) {
  1403. return make_paren_error(c);
  1404. }
  1405. return make_token(Token::BRACKET_CLOSE);
  1406. case '}':
  1407. if (!pop_paren('{')) {
  1408. return make_paren_error(c);
  1409. }
  1410. return make_token(Token::BRACE_CLOSE);
  1411. // Double characters.
  1412. case '!':
  1413. if (_peek() == '=') {
  1414. _advance();
  1415. return make_token(Token::BANG_EQUAL);
  1416. } else {
  1417. return make_token(Token::BANG);
  1418. }
  1419. case '.':
  1420. if (_peek() == '.') {
  1421. _advance();
  1422. return make_token(Token::PERIOD_PERIOD);
  1423. } else if (is_digit(_peek())) {
  1424. // Number starting with '.'.
  1425. return number();
  1426. } else {
  1427. return make_token(Token::PERIOD);
  1428. }
  1429. case '+':
  1430. if (_peek() == '=') {
  1431. _advance();
  1432. return make_token(Token::PLUS_EQUAL);
  1433. } else if (is_digit(_peek()) && !last_token.can_precede_bin_op()) {
  1434. // Number starting with '+'.
  1435. return number();
  1436. } else {
  1437. return make_token(Token::PLUS);
  1438. }
  1439. case '-':
  1440. if (_peek() == '=') {
  1441. _advance();
  1442. return make_token(Token::MINUS_EQUAL);
  1443. } else if (is_digit(_peek()) && !last_token.can_precede_bin_op()) {
  1444. // Number starting with '-'.
  1445. return number();
  1446. } else if (_peek() == '>') {
  1447. _advance();
  1448. return make_token(Token::FORWARD_ARROW);
  1449. } else {
  1450. return make_token(Token::MINUS);
  1451. }
  1452. case '*':
  1453. if (_peek() == '=') {
  1454. _advance();
  1455. return make_token(Token::STAR_EQUAL);
  1456. } else if (_peek() == '*') {
  1457. if (_peek(1) == '=') {
  1458. _advance();
  1459. _advance(); // Advance both '*' and '='
  1460. return make_token(Token::STAR_STAR_EQUAL);
  1461. }
  1462. _advance();
  1463. return make_token(Token::STAR_STAR);
  1464. } else {
  1465. return make_token(Token::STAR);
  1466. }
  1467. case '/':
  1468. if (_peek() == '=') {
  1469. _advance();
  1470. return make_token(Token::SLASH_EQUAL);
  1471. } else {
  1472. return make_token(Token::SLASH);
  1473. }
  1474. case '%':
  1475. if (_peek() == '=') {
  1476. _advance();
  1477. return make_token(Token::PERCENT_EQUAL);
  1478. } else {
  1479. return make_token(Token::PERCENT);
  1480. }
  1481. case '^':
  1482. if (_peek() == '=') {
  1483. _advance();
  1484. return make_token(Token::CARET_EQUAL);
  1485. } else if (_peek() == '"' || _peek() == '\'') {
  1486. // Node path
  1487. return string();
  1488. } else {
  1489. return make_token(Token::CARET);
  1490. }
  1491. case '&':
  1492. if (_peek() == '&') {
  1493. _advance();
  1494. return make_token(Token::AMPERSAND_AMPERSAND);
  1495. } else if (_peek() == '=') {
  1496. _advance();
  1497. return make_token(Token::AMPERSAND_EQUAL);
  1498. } else if (_peek() == '"' || _peek() == '\'') {
  1499. // String Name
  1500. return string();
  1501. } else {
  1502. return make_token(Token::AMPERSAND);
  1503. }
  1504. case '|':
  1505. if (_peek() == '|') {
  1506. _advance();
  1507. return make_token(Token::PIPE_PIPE);
  1508. } else if (_peek() == '=') {
  1509. _advance();
  1510. return make_token(Token::PIPE_EQUAL);
  1511. } else {
  1512. return make_token(Token::PIPE);
  1513. }
  1514. // Potential VCS conflict markers.
  1515. case '=':
  1516. if (_peek() == '=') {
  1517. return check_vcs_marker('=', Token::EQUAL_EQUAL);
  1518. } else {
  1519. return make_token(Token::EQUAL);
  1520. }
  1521. case '<':
  1522. if (_peek() == '=') {
  1523. _advance();
  1524. return make_token(Token::LESS_EQUAL);
  1525. } else if (_peek() == '<') {
  1526. if (_peek(1) == '=') {
  1527. _advance();
  1528. _advance(); // Advance both '<' and '='
  1529. return make_token(Token::LESS_LESS_EQUAL);
  1530. } else {
  1531. return check_vcs_marker('<', Token::LESS_LESS);
  1532. }
  1533. } else {
  1534. return make_token(Token::LESS);
  1535. }
  1536. case '>':
  1537. if (_peek() == '=') {
  1538. _advance();
  1539. return make_token(Token::GREATER_EQUAL);
  1540. } else if (_peek() == '>') {
  1541. if (_peek(1) == '=') {
  1542. _advance();
  1543. _advance(); // Advance both '>' and '='
  1544. return make_token(Token::GREATER_GREATER_EQUAL);
  1545. } else {
  1546. return check_vcs_marker('>', Token::GREATER_GREATER);
  1547. }
  1548. } else {
  1549. return make_token(Token::GREATER);
  1550. }
  1551. default:
  1552. if (is_whitespace(c)) {
  1553. return make_error(vformat(R"(Invalid white space character U+%04X.)", static_cast<int32_t>(c)));
  1554. } else {
  1555. return make_error(vformat(R"(Invalid character "%c" (U+%04X).)", c, static_cast<int32_t>(c)));
  1556. }
  1557. }
  1558. }
  1559. GDScriptTokenizerText::GDScriptTokenizerText() {
  1560. #ifdef TOOLS_ENABLED
  1561. if (EditorSettings::get_singleton()) {
  1562. tab_size = EditorSettings::get_singleton()->get_setting("text_editor/behavior/indent/size");
  1563. }
  1564. #endif // TOOLS_ENABLED
  1565. #ifdef DEBUG_ENABLED
  1566. make_keyword_list();
  1567. #endif // DEBUG_ENABLED
  1568. }