gdscript_tokenizer.cpp 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323
  1. /*************************************************************************/
  2. /* gdscript_tokenizer.cpp */
  3. /*************************************************************************/
  4. /* This file is part of: */
  5. /* GODOT ENGINE */
  6. /* https://godotengine.org */
  7. /*************************************************************************/
  8. /* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */
  9. /* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */
  10. /* */
  11. /* Permission is hereby granted, free of charge, to any person obtaining */
  12. /* a copy of this software and associated documentation files (the */
  13. /* "Software"), to deal in the Software without restriction, including */
  14. /* without limitation the rights to use, copy, modify, merge, publish, */
  15. /* distribute, sublicense, and/or sell copies of the Software, and to */
  16. /* permit persons to whom the Software is furnished to do so, subject to */
  17. /* the following conditions: */
  18. /* */
  19. /* The above copyright notice and this permission notice shall be */
  20. /* included in all copies or substantial portions of the Software. */
  21. /* */
  22. /* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
  23. /* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
  24. /* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/
  25. /* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
  26. /* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
  27. /* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
  28. /* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
  29. /*************************************************************************/
  30. #include "gdscript_tokenizer.h"
  31. #include "core/error_macros.h"
  32. #ifdef TOOLS_ENABLED
  33. #include "editor/editor_settings.h"
  34. #endif
  35. static const char *token_names[] = {
  36. "Empty", // EMPTY,
  37. // Basic
  38. "Annotation", // ANNOTATION
  39. "Identifier", // IDENTIFIER,
  40. "Literal", // LITERAL,
  41. // Comparison
  42. "<", // LESS,
  43. "<=", // LESS_EQUAL,
  44. ">", // GREATER,
  45. ">=", // GREATER_EQUAL,
  46. "==", // EQUAL_EQUAL,
  47. "!=", // BANG_EQUAL,
  48. // Logical
  49. "and", // AND,
  50. "or", // OR,
  51. "not", // NOT,
  52. "&&", // AMPERSAND_AMPERSAND,
  53. "||", // PIPE_PIPE,
  54. "!", // BANG,
  55. // Bitwise
  56. "&", // AMPERSAND,
  57. "|", // PIPE,
  58. "~", // TILDE,
  59. "^", // CARET,
  60. "<<", // LESS_LESS,
  61. ">>", // GREATER_GREATER,
  62. // Math
  63. "+", // PLUS,
  64. "-", // MINUS,
  65. "*", // STAR,
  66. "/", // SLASH,
  67. "%", // PERCENT,
  68. // Assignment
  69. "=", // EQUAL,
  70. "+=", // PLUS_EQUAL,
  71. "-=", // MINUS_EQUAL,
  72. "*=", // STAR_EQUAL,
  73. "/=", // SLASH_EQUAL,
  74. "%=", // PERCENT_EQUAL,
  75. "<<=", // LESS_LESS_EQUAL,
  76. ">>=", // GREATER_GREATER_EQUAL,
  77. "&=", // AMPERSAND_EQUAL,
  78. "|=", // PIPE_EQUAL,
  79. "^=", // CARET_EQUAL,
  80. // Control flow
  81. "if", // IF,
  82. "elif", // ELIF,
  83. "else", // ELSE,
  84. "for", // FOR,
  85. "while", // WHILE,
  86. "break", // BREAK,
  87. "continue", // CONTINUE,
  88. "pass", // PASS,
  89. "return", // RETURN,
  90. "match", // MATCH,
  91. // Keywords
  92. "as", // AS,
  93. "assert", // ASSERT,
  94. "await", // AWAIT,
  95. "breakpoint", // BREAKPOINT,
  96. "class", // CLASS,
  97. "class_name", // CLASS_NAME,
  98. "const", // CONST,
  99. "enum", // ENUM,
  100. "extends", // EXTENDS,
  101. "func", // FUNC,
  102. "in", // IN,
  103. "is", // IS,
  104. "namespace", // NAMESPACE
  105. "preload", // PRELOAD,
  106. "self", // SELF,
  107. "signal", // SIGNAL,
  108. "static", // STATIC,
  109. "super", // SUPER,
  110. "trait", // TRAIT,
  111. "var", // VAR,
  112. "void", // VOID,
  113. "yield", // YIELD,
  114. // Punctuation
  115. "[", // BRACKET_OPEN,
  116. "]", // BRACKET_CLOSE,
  117. "{", // BRACE_OPEN,
  118. "}", // BRACE_CLOSE,
  119. "(", // PARENTHESIS_OPEN,
  120. ")", // PARENTHESIS_CLOSE,
  121. ",", // COMMA,
  122. ";", // SEMICOLON,
  123. ".", // PERIOD,
  124. "..", // PERIOD_PERIOD,
  125. ":", // COLON,
  126. "$", // DOLLAR,
  127. "->", // FORWARD_ARROW,
  128. "_", // UNDERSCORE,
  129. // Whitespace
  130. "Newline", // NEWLINE,
  131. "Indent", // INDENT,
  132. "Dedent", // DEDENT,
  133. // Constants
  134. "PI", // CONST_PI,
  135. "TAU", // CONST_TAU,
  136. "INF", // CONST_INF,
  137. "NaN", // CONST_NAN,
  138. // Error message improvement
  139. "VCS conflict marker", // VCS_CONFLICT_MARKER,
  140. "`", // BACKTICK,
  141. "?", // QUESTION_MARK,
  142. // Special
  143. "Error", // ERROR,
  144. "End of file", // EOF,
  145. };
  146. // Avoid desync.
  147. static_assert(sizeof(token_names) / sizeof(token_names[0]) == GDScriptTokenizer::Token::TK_MAX, "Amount of token names don't match the amount of token types.");
  148. const char *GDScriptTokenizer::Token::get_name() const {
  149. ERR_FAIL_INDEX_V_MSG(type, TK_MAX, "<error>", "Using token type out of the enum.");
  150. return token_names[type];
  151. }
  152. String GDScriptTokenizer::get_token_name(Token::Type p_token_type) {
  153. ERR_FAIL_INDEX_V_MSG(p_token_type, Token::TK_MAX, "<error>", "Using token type out of the enum.");
  154. return token_names[p_token_type];
  155. }
  156. void GDScriptTokenizer::set_source_code(const String &p_source_code) {
  157. source = p_source_code;
  158. if (source.empty()) {
  159. _source = L"";
  160. } else {
  161. _source = source.ptr();
  162. }
  163. _current = _source;
  164. line = 1;
  165. column = 1;
  166. length = p_source_code.length();
  167. position = 0;
  168. }
  169. void GDScriptTokenizer::set_cursor_position(int p_line, int p_column) {
  170. cursor_line = p_line;
  171. cursor_column = p_column;
  172. }
  173. void GDScriptTokenizer::set_multiline_mode(bool p_state) {
  174. multiline_mode = p_state;
  175. }
  176. int GDScriptTokenizer::get_cursor_line() const {
  177. return cursor_line;
  178. }
  179. int GDScriptTokenizer::get_cursor_column() const {
  180. return cursor_column;
  181. }
  182. bool GDScriptTokenizer::is_past_cursor() const {
  183. if (line < cursor_line) {
  184. return false;
  185. }
  186. if (line > cursor_line) {
  187. return true;
  188. }
  189. if (column < cursor_column) {
  190. return false;
  191. }
  192. return true;
  193. }
  194. CharType GDScriptTokenizer::_advance() {
  195. if (unlikely(_is_at_end())) {
  196. return '\0';
  197. }
  198. _current++;
  199. column++;
  200. position++;
  201. if (column > rightmost_column) {
  202. rightmost_column = column;
  203. }
  204. if (unlikely(_is_at_end())) {
  205. // Add extra newline even if it's not there, to satisfy the parser.
  206. newline(true);
  207. // Also add needed unindent.
  208. check_indent();
  209. }
  210. return _peek(-1);
  211. }
  212. void GDScriptTokenizer::push_paren(CharType p_char) {
  213. paren_stack.push_back(p_char);
  214. }
  215. bool GDScriptTokenizer::pop_paren(CharType p_expected) {
  216. if (paren_stack.empty()) {
  217. return false;
  218. }
  219. CharType actual = paren_stack.back()->get();
  220. paren_stack.pop_back();
  221. return actual == p_expected;
  222. }
  223. GDScriptTokenizer::Token GDScriptTokenizer::pop_error() {
  224. Token error = error_stack.back()->get();
  225. error_stack.pop_back();
  226. return error;
  227. }
  228. static bool _is_alphanumeric(CharType c) {
  229. return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9') || c == '_';
  230. }
  231. static bool _is_digit(CharType c) {
  232. return (c >= '0' && c <= '9');
  233. }
  234. static bool _is_hex_digit(CharType c) {
  235. return (c >= '0' && c <= '9') || (c >= 'a' && c <= 'f') || (c >= 'A' && c <= 'F');
  236. }
  237. static bool _is_binary_digit(CharType c) {
  238. return (c == '0' || c == '1');
  239. }
  240. GDScriptTokenizer::Token GDScriptTokenizer::make_token(Token::Type p_type) {
  241. Token token(p_type);
  242. token.start_line = start_line;
  243. token.end_line = line;
  244. token.start_column = start_column;
  245. token.end_column = column;
  246. token.leftmost_column = leftmost_column;
  247. token.rightmost_column = rightmost_column;
  248. token.source = String(_start, _current - _start);
  249. if (p_type != Token::ERROR && cursor_line > -1) {
  250. // Also count whitespace after token.
  251. int offset = 0;
  252. while (_peek(offset) == ' ' || _peek(offset) == '\t') {
  253. offset++;
  254. }
  255. int last_column = column + offset;
  256. // Check cursor position in token.
  257. if (start_line == line) {
  258. // Single line token.
  259. if (cursor_line == start_line && cursor_column >= start_column && cursor_column <= last_column) {
  260. token.cursor_position = cursor_column - start_column;
  261. if (cursor_column == start_column) {
  262. token.cursor_place = CURSOR_BEGINNING;
  263. } else if (cursor_column < column) {
  264. token.cursor_place = CURSOR_MIDDLE;
  265. } else {
  266. token.cursor_place = CURSOR_END;
  267. }
  268. }
  269. } else {
  270. // Multi line token.
  271. if (cursor_line == start_line && cursor_column >= start_column) {
  272. // Is in first line.
  273. token.cursor_position = cursor_column - start_column;
  274. if (cursor_column == start_column) {
  275. token.cursor_place = CURSOR_BEGINNING;
  276. } else {
  277. token.cursor_place = CURSOR_MIDDLE;
  278. }
  279. } else if (cursor_line == line && cursor_column <= last_column) {
  280. // Is in last line.
  281. token.cursor_position = cursor_column - start_column;
  282. if (cursor_column < column) {
  283. token.cursor_place = CURSOR_MIDDLE;
  284. } else {
  285. token.cursor_place = CURSOR_END;
  286. }
  287. } else if (cursor_line > start_line && cursor_line < line) {
  288. // Is in middle line.
  289. token.cursor_position = CURSOR_MIDDLE;
  290. }
  291. }
  292. }
  293. return token;
  294. }
  295. GDScriptTokenizer::Token GDScriptTokenizer::make_literal(const Variant &p_literal) {
  296. Token token = make_token(Token::LITERAL);
  297. token.literal = p_literal;
  298. return token;
  299. }
  300. GDScriptTokenizer::Token GDScriptTokenizer::make_identifier(const StringName &p_identifier) {
  301. Token identifier = make_token(Token::IDENTIFIER);
  302. identifier.literal = p_identifier;
  303. return identifier;
  304. }
  305. GDScriptTokenizer::Token GDScriptTokenizer::make_error(const String &p_message) {
  306. Token error = make_token(Token::ERROR);
  307. error.literal = p_message;
  308. return error;
  309. }
  310. void GDScriptTokenizer::push_error(const String &p_message) {
  311. Token error = make_error(p_message);
  312. error_stack.push_back(error);
  313. }
  314. void GDScriptTokenizer::push_error(const Token &p_error) {
  315. error_stack.push_back(p_error);
  316. }
  317. GDScriptTokenizer::Token GDScriptTokenizer::make_paren_error(CharType p_paren) {
  318. if (paren_stack.empty()) {
  319. return make_error(vformat("Closing \"%c\" doesn't have an opening counterpart.", p_paren));
  320. }
  321. Token error = make_error(vformat("Closing \"%c\" doesn't match the opening \"%c\".", p_paren, paren_stack.back()->get()));
  322. paren_stack.pop_back(); // Remove opening one anyway.
  323. return error;
  324. }
  325. GDScriptTokenizer::Token GDScriptTokenizer::check_vcs_marker(CharType p_test, Token::Type p_double_type) {
  326. const CharType *next = _current + 1;
  327. int chars = 2; // Two already matched.
  328. // Test before consuming characters, since we don't want to consume more than needed.
  329. while (*next == p_test) {
  330. chars++;
  331. next++;
  332. }
  333. if (chars >= 7) {
  334. // It is a VCS conflict marker.
  335. while (chars > 1) {
  336. // Consume all characters (first was already consumed by scan()).
  337. _advance();
  338. chars--;
  339. }
  340. return make_token(Token::VCS_CONFLICT_MARKER);
  341. } else {
  342. // It is only a regular double character token, so we consume the second character.
  343. _advance();
  344. return make_token(p_double_type);
  345. }
  346. }
  347. GDScriptTokenizer::Token GDScriptTokenizer::annotation() {
  348. if (!_is_alphanumeric(_peek())) {
  349. push_error("Expected annotation identifier after \"@\".");
  350. }
  351. while (_is_alphanumeric(_peek())) {
  352. // Consume all identifier characters.
  353. _advance();
  354. }
  355. Token annotation = make_token(Token::ANNOTATION);
  356. annotation.literal = StringName(annotation.source);
  357. return annotation;
  358. }
  359. GDScriptTokenizer::Token GDScriptTokenizer::potential_identifier() {
  360. #define KEYWORDS(KEYWORD_GROUP, KEYWORD) \
  361. KEYWORD_GROUP('a') \
  362. KEYWORD("as", Token::AS) \
  363. KEYWORD("and", Token::AND) \
  364. KEYWORD("assert", Token::ASSERT) \
  365. KEYWORD("await", Token::AWAIT) \
  366. KEYWORD_GROUP('b') \
  367. KEYWORD("break", Token::BREAK) \
  368. KEYWORD("breakpoint", Token::BREAKPOINT) \
  369. KEYWORD_GROUP('c') \
  370. KEYWORD("class", Token::CLASS) \
  371. KEYWORD("class_name", Token::CLASS_NAME) \
  372. KEYWORD("const", Token::CONST) \
  373. KEYWORD("continue", Token::CONTINUE) \
  374. KEYWORD_GROUP('e') \
  375. KEYWORD("elif", Token::ELIF) \
  376. KEYWORD("else", Token::ELSE) \
  377. KEYWORD("enum", Token::ENUM) \
  378. KEYWORD("extends", Token::EXTENDS) \
  379. KEYWORD_GROUP('f') \
  380. KEYWORD("for", Token::FOR) \
  381. KEYWORD("func", Token::FUNC) \
  382. KEYWORD_GROUP('i') \
  383. KEYWORD("if", Token::IF) \
  384. KEYWORD("in", Token::IN) \
  385. KEYWORD("is", Token::IS) \
  386. KEYWORD_GROUP('m') \
  387. KEYWORD("match", Token::MATCH) \
  388. KEYWORD_GROUP('n') \
  389. KEYWORD("namespace", Token::NAMESPACE) \
  390. KEYWORD("not", Token::NOT) \
  391. KEYWORD_GROUP('o') \
  392. KEYWORD("or", Token::OR) \
  393. KEYWORD_GROUP('p') \
  394. KEYWORD("pass", Token::PASS) \
  395. KEYWORD("preload", Token::PRELOAD) \
  396. KEYWORD_GROUP('r') \
  397. KEYWORD("return", Token::RETURN) \
  398. KEYWORD_GROUP('s') \
  399. KEYWORD("self", Token::SELF) \
  400. KEYWORD("signal", Token::SIGNAL) \
  401. KEYWORD("static", Token::STATIC) \
  402. KEYWORD("super", Token::SUPER) \
  403. KEYWORD_GROUP('t') \
  404. KEYWORD("trait", Token::TRAIT) \
  405. KEYWORD_GROUP('v') \
  406. KEYWORD("var", Token::VAR) \
  407. KEYWORD("void", Token::VOID) \
  408. KEYWORD_GROUP('w') \
  409. KEYWORD("while", Token::WHILE) \
  410. KEYWORD_GROUP('y') \
  411. KEYWORD("yield", Token::YIELD) \
  412. KEYWORD_GROUP('I') \
  413. KEYWORD("INF", Token::CONST_INF) \
  414. KEYWORD_GROUP('N') \
  415. KEYWORD("NAN", Token::CONST_NAN) \
  416. KEYWORD_GROUP('P') \
  417. KEYWORD("PI", Token::CONST_PI) \
  418. KEYWORD_GROUP('T') \
  419. KEYWORD("TAU", Token::CONST_TAU)
  420. #define MIN_KEYWORD_LENGTH 2
  421. #define MAX_KEYWORD_LENGTH 10
  422. // Consume all alphanumeric characters.
  423. while (_is_alphanumeric(_peek())) {
  424. _advance();
  425. }
  426. int length = _current - _start;
  427. if (length == 1 && _peek(-1) == '_') {
  428. // Lone underscore.
  429. return make_token(Token::UNDERSCORE);
  430. }
  431. String name(_start, length);
  432. if (length < MIN_KEYWORD_LENGTH || length > MAX_KEYWORD_LENGTH) {
  433. // Cannot be a keyword, as the length doesn't match any.
  434. return make_identifier(name);
  435. }
  436. // Define some helper macros for the switch case.
  437. #define KEYWORD_GROUP_CASE(char) \
  438. break; \
  439. case char:
  440. #define KEYWORD(keyword, token_type) \
  441. { \
  442. const int keyword_length = sizeof(keyword) - 1; \
  443. static_assert(keyword_length <= MAX_KEYWORD_LENGTH, "There's a keyword longer than the defined maximum length"); \
  444. static_assert(keyword_length >= MIN_KEYWORD_LENGTH, "There's a keyword shorter than the defined minimum length"); \
  445. if (keyword_length == length && name == keyword) { \
  446. return make_token(token_type); \
  447. } \
  448. }
  449. // Find if it's a keyword.
  450. switch (_start[0]) {
  451. default:
  452. KEYWORDS(KEYWORD_GROUP_CASE, KEYWORD)
  453. break;
  454. }
  455. // Check if it's a special literal
  456. if (length == 4) {
  457. if (name == "true") {
  458. return make_literal(true);
  459. } else if (name == "null") {
  460. return make_literal(Variant());
  461. }
  462. } else if (length == 5) {
  463. if (name == "false") {
  464. return make_literal(false);
  465. }
  466. }
  467. // Not a keyword, so must be an identifier.
  468. return make_identifier(name);
  469. #undef KEYWORDS
  470. #undef MIN_KEYWORD_LENGTH
  471. #undef MAX_KEYWORD_LENGTH
  472. #undef KEYWORD_GROUP_CASE
  473. #undef KEYWORD
  474. }
  475. void GDScriptTokenizer::newline(bool p_make_token) {
  476. // Don't overwrite previous newline, nor create if we want a line contination.
  477. if (p_make_token && !pending_newline && !line_continuation) {
  478. Token newline(Token::NEWLINE);
  479. newline.start_line = line;
  480. newline.end_line = line;
  481. newline.start_column = column - 1;
  482. newline.end_column = column;
  483. newline.leftmost_column = newline.start_column;
  484. newline.rightmost_column = newline.end_column;
  485. pending_newline = true;
  486. last_newline = newline;
  487. }
  488. // Increment line/column counters.
  489. line++;
  490. column = 1;
  491. leftmost_column = 1;
  492. }
  493. GDScriptTokenizer::Token GDScriptTokenizer::number() {
  494. int base = 10;
  495. bool has_decimal = false;
  496. bool has_exponent = false;
  497. bool has_error = false;
  498. bool (*digit_check_func)(CharType) = _is_digit;
  499. if (_peek(-1) == '.') {
  500. has_decimal = true;
  501. } else if (_peek(-1) == '0') {
  502. if (_peek() == 'x') {
  503. // Hexadecimal.
  504. base = 16;
  505. digit_check_func = _is_hex_digit;
  506. _advance();
  507. } else if (_peek() == 'b') {
  508. // Binary.
  509. base = 2;
  510. digit_check_func = _is_binary_digit;
  511. _advance();
  512. }
  513. }
  514. // Allow '_' to be used in a number, for readability.
  515. while (digit_check_func(_peek()) || _peek() == '_') {
  516. _advance();
  517. }
  518. // It might be a ".." token (instead of decimal point) so we check if it's not.
  519. if (_peek() == '.' && _peek(1) != '.') {
  520. if (base == 10 && !has_decimal) {
  521. has_decimal = true;
  522. } else if (base == 10) {
  523. Token error = make_error("Cannot use a decimal point twice in a number.");
  524. error.start_column = column;
  525. error.leftmost_column = column;
  526. error.end_column = column + 1;
  527. error.rightmost_column = column + 1;
  528. push_error(error);
  529. has_error = true;
  530. } else if (base == 16) {
  531. Token error = make_error("Cannot use a decimal point in a hexadecimal number.");
  532. error.start_column = column;
  533. error.leftmost_column = column;
  534. error.end_column = column + 1;
  535. error.rightmost_column = column + 1;
  536. push_error(error);
  537. has_error = true;
  538. } else {
  539. Token error = make_error("Cannot use a decimal point in a binary number.");
  540. error.start_column = column;
  541. error.leftmost_column = column;
  542. error.end_column = column + 1;
  543. error.rightmost_column = column + 1;
  544. push_error(error);
  545. has_error = true;
  546. }
  547. if (!has_error) {
  548. _advance();
  549. // Consume decimal digits.
  550. while (_is_digit(_peek()) || _peek() == '_') {
  551. _advance();
  552. }
  553. }
  554. }
  555. if (base == 10) {
  556. if (_peek() == 'e' || _peek() == 'E') {
  557. has_exponent = true;
  558. _advance();
  559. if (_peek() == '+' || _peek() == '-') {
  560. // Exponent sign.
  561. _advance();
  562. }
  563. // Consume exponent digits.
  564. while (_is_digit(_peek()) || _peek() == '_') {
  565. _advance();
  566. }
  567. }
  568. }
  569. // Detect extra decimal point.
  570. if (!has_error && has_decimal && _peek() == '.' && _peek(1) != '.') {
  571. Token error = make_error("Cannot use a decimal point twice in a number.");
  572. error.start_column = column;
  573. error.leftmost_column = column;
  574. error.end_column = column + 1;
  575. error.rightmost_column = column + 1;
  576. push_error(error);
  577. has_error = true;
  578. } else if (_is_alphanumeric(_peek())) {
  579. // Letter at the end of the number.
  580. push_error("Invalid numeric notation.");
  581. }
  582. // Create a string with the whole number.
  583. int length = _current - _start;
  584. String number = String(_start, length).replace("_", "");
  585. // Convert to the appropriate literal type.
  586. if (base == 16) {
  587. int64_t value = number.hex_to_int();
  588. return make_literal(value);
  589. } else if (base == 2) {
  590. int64_t value = number.bin_to_int();
  591. return make_literal(value);
  592. } else if (has_decimal || has_exponent) {
  593. double value = number.to_double();
  594. return make_literal(value);
  595. } else {
  596. int64_t value = number.to_int();
  597. return make_literal(value);
  598. }
  599. }
  600. GDScriptTokenizer::Token GDScriptTokenizer::string() {
  601. enum StringType {
  602. STRING_REGULAR,
  603. STRING_NAME,
  604. STRING_NODEPATH,
  605. };
  606. bool is_multiline = false;
  607. StringType type = STRING_REGULAR;
  608. if (_peek(-1) == '&') {
  609. type = STRING_NAME;
  610. _advance();
  611. } else if (_peek(-1) == '^') {
  612. type = STRING_NODEPATH;
  613. _advance();
  614. }
  615. CharType quote_char = _peek(-1);
  616. if (_peek() == quote_char && _peek(1) == quote_char) {
  617. is_multiline = true;
  618. // Consume all quotes.
  619. _advance();
  620. _advance();
  621. }
  622. String result;
  623. for (;;) {
  624. // Consume actual string.
  625. if (_is_at_end()) {
  626. return make_error("Unterminated string.");
  627. }
  628. CharType ch = _peek();
  629. if (ch == '\\') {
  630. // Escape pattern.
  631. _advance();
  632. if (_is_at_end()) {
  633. return make_error("Unterminated string.");
  634. }
  635. // Grab escape character.
  636. CharType code = _peek();
  637. _advance();
  638. if (_is_at_end()) {
  639. return make_error("Unterminated string.");
  640. }
  641. CharType escaped = 0;
  642. bool valid_escape = true;
  643. switch (code) {
  644. case 'a':
  645. escaped = '\a';
  646. break;
  647. case 'b':
  648. escaped = '\b';
  649. break;
  650. case 'f':
  651. escaped = '\f';
  652. break;
  653. case 'n':
  654. escaped = '\n';
  655. break;
  656. case 'r':
  657. escaped = '\r';
  658. break;
  659. case 't':
  660. escaped = '\t';
  661. break;
  662. case 'v':
  663. escaped = '\v';
  664. break;
  665. case '\'':
  666. escaped = '\'';
  667. break;
  668. case '\"':
  669. escaped = '\"';
  670. break;
  671. case '\\':
  672. escaped = '\\';
  673. break;
  674. case 'u':
  675. // Hexadecimal sequence.
  676. for (int i = 0; i < 4; i++) {
  677. if (_is_at_end()) {
  678. return make_error("Unterminated string.");
  679. }
  680. CharType digit = _peek();
  681. CharType value = 0;
  682. if (digit >= '0' && digit <= '9') {
  683. value = digit - '0';
  684. } else if (digit >= 'a' && digit <= 'f') {
  685. value = digit - 'a';
  686. value += 10;
  687. } else if (digit >= 'A' && digit <= 'F') {
  688. value = digit - 'A';
  689. value += 10;
  690. } else {
  691. // Make error, but keep parsing the string.
  692. Token error = make_error("Invalid hexadecimal digit in unicode escape sequence.");
  693. error.start_column = column;
  694. error.leftmost_column = error.start_column;
  695. error.end_column = column + 1;
  696. error.rightmost_column = error.end_column;
  697. push_error(error);
  698. valid_escape = false;
  699. break;
  700. }
  701. escaped <<= 4;
  702. escaped |= value;
  703. _advance();
  704. }
  705. break;
  706. case '\r':
  707. if (_peek() != '\n') {
  708. // Carriage return without newline in string. (???)
  709. // Just add it to the string and keep going.
  710. result += ch;
  711. _advance();
  712. break;
  713. }
  714. [[fallthrough]];
  715. case '\n':
  716. // Escaping newline.
  717. newline(false);
  718. valid_escape = false; // Don't add to the string.
  719. break;
  720. default:
  721. Token error = make_error("Invalid escape in string.");
  722. error.start_column = column - 2;
  723. error.leftmost_column = error.start_column;
  724. push_error(error);
  725. valid_escape = false;
  726. break;
  727. }
  728. if (valid_escape) {
  729. result += escaped;
  730. }
  731. } else if (ch == quote_char) {
  732. _advance();
  733. if (is_multiline) {
  734. if (_peek() == quote_char && _peek(1) == quote_char) {
  735. // Ended the multiline string. Consume all quotes.
  736. _advance();
  737. _advance();
  738. break;
  739. }
  740. } else {
  741. // Ended single-line string.
  742. break;
  743. }
  744. } else {
  745. result += ch;
  746. _advance();
  747. if (ch == '\n') {
  748. newline(false);
  749. }
  750. }
  751. }
  752. // Make the literal.
  753. Variant string;
  754. switch (type) {
  755. case STRING_NAME:
  756. string = StringName(result);
  757. break;
  758. case STRING_NODEPATH:
  759. string = NodePath(result);
  760. break;
  761. case STRING_REGULAR:
  762. string = result;
  763. break;
  764. }
  765. return make_literal(string);
  766. }
  767. void GDScriptTokenizer::check_indent() {
  768. ERR_FAIL_COND_MSG(column != 1, "Checking tokenizer indentation in the middle of a line.");
  769. if (_is_at_end()) {
  770. // Send dedents for every indent level.
  771. pending_indents -= indent_level();
  772. indent_stack.clear();
  773. return;
  774. }
  775. for (;;) {
  776. CharType current_indent_char = _peek();
  777. int indent_count = 0;
  778. if (current_indent_char != ' ' && current_indent_char != '\t' && current_indent_char != '\r' && current_indent_char != '\n' && current_indent_char != '#') {
  779. // First character of the line is not whitespace, so we clear all indentation levels.
  780. // Unless we are in a continuation or in multiline mode (inside expression).
  781. if (line_continuation || multiline_mode) {
  782. return;
  783. }
  784. pending_indents -= indent_level();
  785. indent_stack.clear();
  786. return;
  787. }
  788. if (_peek() == '\r') {
  789. _advance();
  790. if (_peek() != '\n') {
  791. push_error("Stray carriage return character in source code.");
  792. }
  793. }
  794. if (_peek() == '\n') {
  795. // Empty line, keep going.
  796. _advance();
  797. newline(false);
  798. continue;
  799. }
  800. // Check indent level.
  801. bool mixed = false;
  802. while (!_is_at_end()) {
  803. CharType space = _peek();
  804. if (space == '\t') {
  805. // Consider individual tab columns.
  806. column += tab_size - 1;
  807. indent_count += tab_size;
  808. } else if (space == ' ') {
  809. indent_count += 1;
  810. } else {
  811. break;
  812. }
  813. mixed = mixed || space != current_indent_char;
  814. _advance();
  815. }
  816. if (mixed) {
  817. Token error = make_error("Mixed use of tabs and spaces for indentation.");
  818. error.start_line = line;
  819. error.start_column = 1;
  820. error.leftmost_column = 1;
  821. error.rightmost_column = column;
  822. push_error(error);
  823. }
  824. if (_is_at_end()) {
  825. // Reached the end with an empty line, so just dedent as much as needed.
  826. pending_indents -= indent_level();
  827. indent_stack.clear();
  828. return;
  829. }
  830. if (_peek() == '\r') {
  831. _advance();
  832. if (_peek() != '\n') {
  833. push_error("Stray carriage return character in source code.");
  834. }
  835. }
  836. if (_peek() == '\n') {
  837. // Empty line, keep going.
  838. _advance();
  839. newline(false);
  840. continue;
  841. }
  842. if (_peek() == '#') {
  843. // Comment. Advance to the next line.
  844. while (_peek() != '\n' && !_is_at_end()) {
  845. _advance();
  846. }
  847. if (_is_at_end()) {
  848. // Reached the end with an empty line, so just dedent as much as needed.
  849. pending_indents -= indent_level();
  850. indent_stack.clear();
  851. return;
  852. }
  853. _advance(); // Consume '\n'.
  854. newline(false);
  855. continue;
  856. }
  857. if (line_continuation || multiline_mode) {
  858. // We cleared up all the whitespace at the beginning of the line.
  859. // But if this is a continuation or multiline mode and we don't want any indentation change.
  860. return;
  861. }
  862. // Check if indentation character is consistent.
  863. if (indent_char == '\0') {
  864. // First time indenting, choose character now.
  865. indent_char = current_indent_char;
  866. } else if (current_indent_char != indent_char) {
  867. Token error = make_error(vformat("Used \"%c\" for indentation instead \"%c\" as used before in the file.", String(&current_indent_char, 1).c_escape(), String(&indent_char, 1).c_escape()));
  868. error.start_line = line;
  869. error.start_column = 1;
  870. error.leftmost_column = 1;
  871. error.rightmost_column = column;
  872. push_error(error);
  873. }
  874. // Now we can do actual indentation changes.
  875. // Check if indent or dedent.
  876. int previous_indent = 0;
  877. if (indent_level() > 0) {
  878. previous_indent = indent_stack.back()->get();
  879. }
  880. if (indent_count == previous_indent) {
  881. // No change in indentation.
  882. return;
  883. }
  884. if (indent_count > previous_indent) {
  885. // Indentation increased.
  886. indent_stack.push_back(indent_count);
  887. pending_indents++;
  888. } else {
  889. // Indentation decreased (dedent).
  890. if (indent_level() == 0) {
  891. push_error("Tokenizer bug: trying to dedent without previous indent.");
  892. return;
  893. }
  894. while (indent_level() > 0 && indent_stack.back()->get() > indent_count) {
  895. indent_stack.pop_back();
  896. pending_indents--;
  897. }
  898. if ((indent_level() > 0 && indent_stack.back()->get() != indent_count) || (indent_level() == 0 && indent_count != 0)) {
  899. // Mismatched indentation alignment.
  900. Token error = make_error("Unindent doesn't match the previous indentation level.");
  901. error.start_line = line;
  902. error.start_column = 1;
  903. error.leftmost_column = 1;
  904. error.end_column = column + 1;
  905. error.rightmost_column = column + 1;
  906. push_error(error);
  907. // Still, we'll be lenient and keep going, so keep this level in the stack.
  908. indent_stack.push_back(indent_count);
  909. }
  910. }
  911. break; // Get out of the loop in any case.
  912. }
  913. }
  914. void GDScriptTokenizer::_skip_whitespace() {
  915. if (pending_indents != 0) {
  916. // Still have some indent/dedent tokens to give.
  917. return;
  918. }
  919. bool is_bol = column == 1; // Beginning of line.
  920. if (is_bol) {
  921. check_indent();
  922. return;
  923. }
  924. for (;;) {
  925. CharType c = _peek();
  926. switch (c) {
  927. case ' ':
  928. _advance();
  929. break;
  930. case '\t':
  931. _advance();
  932. // Consider individual tab columns.
  933. column += tab_size - 1;
  934. break;
  935. case '\r':
  936. _advance(); // Consume either way.
  937. if (_peek() != '\n') {
  938. push_error("Stray carriage return character in source code.");
  939. return;
  940. }
  941. break;
  942. case '\n':
  943. _advance();
  944. newline(!is_bol); // Don't create new line token if line is empty.
  945. check_indent();
  946. break;
  947. case '#':
  948. // Comment.
  949. while (_peek() != '\n' && !_is_at_end()) {
  950. _advance();
  951. }
  952. if (_is_at_end()) {
  953. return;
  954. }
  955. _advance(); // Consume '\n'
  956. newline(!is_bol);
  957. check_indent();
  958. break;
  959. default:
  960. return;
  961. }
  962. }
  963. }
  964. GDScriptTokenizer::Token GDScriptTokenizer::scan() {
  965. if (has_error()) {
  966. return pop_error();
  967. }
  968. _skip_whitespace();
  969. if (pending_newline) {
  970. pending_newline = false;
  971. if (!multiline_mode) {
  972. // Don't return newline tokens on multine mode.
  973. return last_newline;
  974. }
  975. }
  976. // Check for potential errors after skipping whitespace().
  977. if (has_error()) {
  978. return pop_error();
  979. }
  980. _start = _current;
  981. start_line = line;
  982. start_column = column;
  983. leftmost_column = column;
  984. rightmost_column = column;
  985. if (pending_indents != 0) {
  986. // Adjust position for indent.
  987. _start -= start_column - 1;
  988. start_column = 1;
  989. leftmost_column = 1;
  990. if (pending_indents > 0) {
  991. // Indents.
  992. pending_indents--;
  993. return make_token(Token::INDENT);
  994. } else {
  995. // Dedents.
  996. pending_indents++;
  997. Token dedent = make_token(Token::DEDENT);
  998. dedent.end_column += 1;
  999. dedent.rightmost_column += 1;
  1000. return dedent;
  1001. }
  1002. }
  1003. if (_is_at_end()) {
  1004. return make_token(Token::TK_EOF);
  1005. }
  1006. const CharType c = _advance();
  1007. if (c == '\\') {
  1008. // Line continuation with backslash.
  1009. if (_peek() == '\r') {
  1010. if (_peek(1) != '\n') {
  1011. return make_error("Unexpected carriage return character.");
  1012. }
  1013. _advance();
  1014. }
  1015. if (_peek() != '\n') {
  1016. return make_error("Expected new line after \"\\\".");
  1017. }
  1018. _advance();
  1019. newline(false);
  1020. line_continuation = true;
  1021. return scan(); // Recurse to get next token.
  1022. }
  1023. line_continuation = false;
  1024. if (_is_digit(c)) {
  1025. return number();
  1026. } else if (_is_alphanumeric(c)) {
  1027. return potential_identifier();
  1028. }
  1029. switch (c) {
  1030. // String literals.
  1031. case '"':
  1032. case '\'':
  1033. return string();
  1034. // Annotation.
  1035. case '@':
  1036. return annotation();
  1037. // Single characters.
  1038. case '~':
  1039. return make_token(Token::TILDE);
  1040. case ',':
  1041. return make_token(Token::COMMA);
  1042. case ':':
  1043. return make_token(Token::COLON);
  1044. case ';':
  1045. return make_token(Token::SEMICOLON);
  1046. case '$':
  1047. return make_token(Token::DOLLAR);
  1048. case '?':
  1049. return make_token(Token::QUESTION_MARK);
  1050. case '`':
  1051. return make_token(Token::BACKTICK);
  1052. // Parens.
  1053. case '(':
  1054. push_paren('(');
  1055. return make_token(Token::PARENTHESIS_OPEN);
  1056. case '[':
  1057. push_paren('[');
  1058. return make_token(Token::BRACKET_OPEN);
  1059. case '{':
  1060. push_paren('{');
  1061. return make_token(Token::BRACE_OPEN);
  1062. case ')':
  1063. if (!pop_paren('(')) {
  1064. return make_paren_error(c);
  1065. }
  1066. return make_token(Token::PARENTHESIS_CLOSE);
  1067. case ']':
  1068. if (!pop_paren('[')) {
  1069. return make_paren_error(c);
  1070. }
  1071. return make_token(Token::BRACKET_CLOSE);
  1072. case '}':
  1073. if (!pop_paren('{')) {
  1074. return make_paren_error(c);
  1075. }
  1076. return make_token(Token::BRACE_CLOSE);
  1077. // Double characters.
  1078. case '!':
  1079. if (_peek() == '=') {
  1080. _advance();
  1081. return make_token(Token::BANG_EQUAL);
  1082. } else {
  1083. return make_token(Token::BANG);
  1084. }
  1085. case '.':
  1086. if (_peek() == '.') {
  1087. _advance();
  1088. return make_token(Token::PERIOD_PERIOD);
  1089. } else if (_is_digit(_peek())) {
  1090. // Number starting with '.'.
  1091. return number();
  1092. } else {
  1093. return make_token(Token::PERIOD);
  1094. }
  1095. case '+':
  1096. if (_peek() == '=') {
  1097. _advance();
  1098. return make_token(Token::PLUS_EQUAL);
  1099. } else {
  1100. return make_token(Token::PLUS);
  1101. }
  1102. case '-':
  1103. if (_peek() == '=') {
  1104. _advance();
  1105. return make_token(Token::MINUS_EQUAL);
  1106. } else if (_peek() == '>') {
  1107. _advance();
  1108. return make_token(Token::FORWARD_ARROW);
  1109. } else {
  1110. return make_token(Token::MINUS);
  1111. }
  1112. case '*':
  1113. if (_peek() == '=') {
  1114. _advance();
  1115. return make_token(Token::STAR_EQUAL);
  1116. } else {
  1117. return make_token(Token::STAR);
  1118. }
  1119. case '/':
  1120. if (_peek() == '=') {
  1121. _advance();
  1122. return make_token(Token::SLASH_EQUAL);
  1123. } else {
  1124. return make_token(Token::SLASH);
  1125. }
  1126. case '%':
  1127. if (_peek() == '=') {
  1128. _advance();
  1129. return make_token(Token::PERCENT_EQUAL);
  1130. } else {
  1131. return make_token(Token::PERCENT);
  1132. }
  1133. case '^':
  1134. if (_peek() == '=') {
  1135. _advance();
  1136. return make_token(Token::CARET_EQUAL);
  1137. } else if (_peek() == '"' || _peek() == '\'') {
  1138. // Node path
  1139. return string();
  1140. } else {
  1141. return make_token(Token::CARET);
  1142. }
  1143. case '&':
  1144. if (_peek() == '&') {
  1145. _advance();
  1146. return make_token(Token::AMPERSAND_AMPERSAND);
  1147. } else if (_peek() == '=') {
  1148. _advance();
  1149. return make_token(Token::AMPERSAND_EQUAL);
  1150. } else if (_peek() == '"' || _peek() == '\'') {
  1151. // String Name
  1152. return string();
  1153. } else {
  1154. return make_token(Token::AMPERSAND);
  1155. }
  1156. case '|':
  1157. if (_peek() == '|') {
  1158. _advance();
  1159. return make_token(Token::PIPE_PIPE);
  1160. } else if (_peek() == '=') {
  1161. _advance();
  1162. return make_token(Token::PIPE_EQUAL);
  1163. } else {
  1164. return make_token(Token::PIPE);
  1165. }
  1166. // Potential VCS conflict markers.
  1167. case '=':
  1168. if (_peek() == '=') {
  1169. return check_vcs_marker('=', Token::EQUAL_EQUAL);
  1170. } else {
  1171. return make_token(Token::EQUAL);
  1172. }
  1173. case '<':
  1174. if (_peek() == '=') {
  1175. _advance();
  1176. return make_token(Token::LESS_EQUAL);
  1177. } else if (_peek() == '<') {
  1178. if (_peek(1) == '=') {
  1179. _advance();
  1180. _advance(); // Advance both '<' and '='
  1181. return make_token(Token::LESS_LESS_EQUAL);
  1182. } else {
  1183. return check_vcs_marker('<', Token::LESS_LESS);
  1184. }
  1185. } else {
  1186. return make_token(Token::LESS);
  1187. }
  1188. case '>':
  1189. if (_peek() == '=') {
  1190. _advance();
  1191. return make_token(Token::GREATER_EQUAL);
  1192. } else if (_peek() == '>') {
  1193. if (_peek(1) == '=') {
  1194. _advance();
  1195. _advance(); // Advance both '>' and '='
  1196. return make_token(Token::GREATER_GREATER_EQUAL);
  1197. } else {
  1198. return check_vcs_marker('>', Token::GREATER_GREATER);
  1199. }
  1200. } else {
  1201. return make_token(Token::GREATER);
  1202. }
  1203. default:
  1204. return make_error(vformat(R"(Unknown character "%s".")", String(&c, 1)));
  1205. }
  1206. }
  1207. GDScriptTokenizer::GDScriptTokenizer() {
  1208. #ifdef TOOLS_ENABLED
  1209. if (EditorSettings::get_singleton()) {
  1210. tab_size = EditorSettings::get_singleton()->get_setting("text_editor/indent/size");
  1211. }
  1212. #endif // TOOLS_ENABLED
  1213. }