gdscript_tokenizer.cpp 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335
  1. /*************************************************************************/
  2. /* gdscript_tokenizer.cpp */
  3. /*************************************************************************/
  4. /* This file is part of: */
  5. /* GODOT ENGINE */
  6. /* https://godotengine.org */
  7. /*************************************************************************/
  8. /* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */
  9. /* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */
  10. /* */
  11. /* Permission is hereby granted, free of charge, to any person obtaining */
  12. /* a copy of this software and associated documentation files (the */
  13. /* "Software"), to deal in the Software without restriction, including */
  14. /* without limitation the rights to use, copy, modify, merge, publish, */
  15. /* distribute, sublicense, and/or sell copies of the Software, and to */
  16. /* permit persons to whom the Software is furnished to do so, subject to */
  17. /* the following conditions: */
  18. /* */
  19. /* The above copyright notice and this permission notice shall be */
  20. /* included in all copies or substantial portions of the Software. */
  21. /* */
  22. /* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
  23. /* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
  24. /* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/
  25. /* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
  26. /* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
  27. /* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
  28. /* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
  29. /*************************************************************************/
  30. #include "gdscript_tokenizer.h"
  31. #include "core/error_macros.h"
  32. #ifdef TOOLS_ENABLED
  33. #include "editor/editor_settings.h"
  34. #endif
  35. static const char *token_names[] = {
  36. "Empty", // EMPTY,
  37. // Basic
  38. "Annotation", // ANNOTATION
  39. "Identifier", // IDENTIFIER,
  40. "Literal", // LITERAL,
  41. // Comparison
  42. "<", // LESS,
  43. "<=", // LESS_EQUAL,
  44. ">", // GREATER,
  45. ">=", // GREATER_EQUAL,
  46. "==", // EQUAL_EQUAL,
  47. "!=", // BANG_EQUAL,
  48. // Logical
  49. "and", // AND,
  50. "or", // OR,
  51. "not", // NOT,
  52. "&&", // AMPERSAND_AMPERSAND,
  53. "||", // PIPE_PIPE,
  54. "!", // BANG,
  55. // Bitwise
  56. "&", // AMPERSAND,
  57. "|", // PIPE,
  58. "~", // TILDE,
  59. "^", // CARET,
  60. "<<", // LESS_LESS,
  61. ">>", // GREATER_GREATER,
  62. // Math
  63. "+", // PLUS,
  64. "-", // MINUS,
  65. "*", // STAR,
  66. "/", // SLASH,
  67. "%", // PERCENT,
  68. // Assignment
  69. "=", // EQUAL,
  70. "+=", // PLUS_EQUAL,
  71. "-=", // MINUS_EQUAL,
  72. "*=", // STAR_EQUAL,
  73. "/=", // SLASH_EQUAL,
  74. "%=", // PERCENT_EQUAL,
  75. "<<=", // LESS_LESS_EQUAL,
  76. ">>=", // GREATER_GREATER_EQUAL,
  77. "&=", // AMPERSAND_EQUAL,
  78. "|=", // PIPE_EQUAL,
  79. "^=", // CARET_EQUAL,
  80. // Control flow
  81. "if", // IF,
  82. "elif", // ELIF,
  83. "else", // ELSE,
  84. "for", // FOR,
  85. "while", // WHILE,
  86. "break", // BREAK,
  87. "continue", // CONTINUE,
  88. "pass", // PASS,
  89. "return", // RETURN,
  90. "match", // MATCH,
  91. // Keywords
  92. "as", // AS,
  93. "assert", // ASSERT,
  94. "await", // AWAIT,
  95. "breakpoint", // BREAKPOINT,
  96. "class", // CLASS,
  97. "class_name", // CLASS_NAME,
  98. "const", // CONST,
  99. "enum", // ENUM,
  100. "extends", // EXTENDS,
  101. "func", // FUNC,
  102. "in", // IN,
  103. "is", // IS,
  104. "namespace", // NAMESPACE
  105. "preload", // PRELOAD,
  106. "self", // SELF,
  107. "signal", // SIGNAL,
  108. "static", // STATIC,
  109. "super", // SUPER,
  110. "trait", // TRAIT,
  111. "var", // VAR,
  112. "void", // VOID,
  113. "yield", // YIELD,
  114. // Punctuation
  115. "[", // BRACKET_OPEN,
  116. "]", // BRACKET_CLOSE,
  117. "{", // BRACE_OPEN,
  118. "}", // BRACE_CLOSE,
  119. "(", // PARENTHESIS_OPEN,
  120. ")", // PARENTHESIS_CLOSE,
  121. ",", // COMMA,
  122. ";", // SEMICOLON,
  123. ".", // PERIOD,
  124. "..", // PERIOD_PERIOD,
  125. ":", // COLON,
  126. "$", // DOLLAR,
  127. "->", // FORWARD_ARROW,
  128. "_", // UNDERSCORE,
  129. // Whitespace
  130. "Newline", // NEWLINE,
  131. "Indent", // INDENT,
  132. "Dedent", // DEDENT,
  133. // Constants
  134. "PI", // CONST_PI,
  135. "TAU", // CONST_TAU,
  136. "INF", // CONST_INF,
  137. "NaN", // CONST_NAN,
  138. // Error message improvement
  139. "VCS conflict marker", // VCS_CONFLICT_MARKER,
  140. "`", // BACKTICK,
  141. "?", // QUESTION_MARK,
  142. // Special
  143. "Error", // ERROR,
  144. "End of file", // EOF,
  145. };
  146. // Avoid desync.
  147. static_assert(sizeof(token_names) / sizeof(token_names[0]) == GDScriptTokenizer::Token::TK_MAX, "Amount of token names don't match the amount of token types.");
  148. const char *GDScriptTokenizer::Token::get_name() const {
  149. ERR_FAIL_INDEX_V_MSG(type, TK_MAX, "<error>", "Using token type out of the enum.");
  150. return token_names[type];
  151. }
  152. bool GDScriptTokenizer::Token::is_identifier() const {
  153. // Note: Most keywords should not be recognized as identifiers.
  154. // These are only exceptions for stuff that already is on the engine's API.
  155. switch (type) {
  156. case IDENTIFIER:
  157. case MATCH: // Used in String.match().
  158. return true;
  159. default:
  160. return false;
  161. }
  162. }
  163. String GDScriptTokenizer::get_token_name(Token::Type p_token_type) {
  164. ERR_FAIL_INDEX_V_MSG(p_token_type, Token::TK_MAX, "<error>", "Using token type out of the enum.");
  165. return token_names[p_token_type];
  166. }
  167. void GDScriptTokenizer::set_source_code(const String &p_source_code) {
  168. source = p_source_code;
  169. if (source.empty()) {
  170. _source = L"";
  171. } else {
  172. _source = source.ptr();
  173. }
  174. _current = _source;
  175. line = 1;
  176. column = 1;
  177. length = p_source_code.length();
  178. position = 0;
  179. }
  180. void GDScriptTokenizer::set_cursor_position(int p_line, int p_column) {
  181. cursor_line = p_line;
  182. cursor_column = p_column;
  183. }
  184. void GDScriptTokenizer::set_multiline_mode(bool p_state) {
  185. multiline_mode = p_state;
  186. }
  187. int GDScriptTokenizer::get_cursor_line() const {
  188. return cursor_line;
  189. }
  190. int GDScriptTokenizer::get_cursor_column() const {
  191. return cursor_column;
  192. }
  193. bool GDScriptTokenizer::is_past_cursor() const {
  194. if (line < cursor_line) {
  195. return false;
  196. }
  197. if (line > cursor_line) {
  198. return true;
  199. }
  200. if (column < cursor_column) {
  201. return false;
  202. }
  203. return true;
  204. }
  205. CharType GDScriptTokenizer::_advance() {
  206. if (unlikely(_is_at_end())) {
  207. return '\0';
  208. }
  209. _current++;
  210. column++;
  211. position++;
  212. if (column > rightmost_column) {
  213. rightmost_column = column;
  214. }
  215. if (unlikely(_is_at_end())) {
  216. // Add extra newline even if it's not there, to satisfy the parser.
  217. newline(true);
  218. // Also add needed unindent.
  219. check_indent();
  220. }
  221. return _peek(-1);
  222. }
  223. void GDScriptTokenizer::push_paren(CharType p_char) {
  224. paren_stack.push_back(p_char);
  225. }
  226. bool GDScriptTokenizer::pop_paren(CharType p_expected) {
  227. if (paren_stack.empty()) {
  228. return false;
  229. }
  230. CharType actual = paren_stack.back()->get();
  231. paren_stack.pop_back();
  232. return actual == p_expected;
  233. }
  234. GDScriptTokenizer::Token GDScriptTokenizer::pop_error() {
  235. Token error = error_stack.back()->get();
  236. error_stack.pop_back();
  237. return error;
  238. }
  239. static bool _is_alphanumeric(CharType c) {
  240. return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9') || c == '_';
  241. }
  242. static bool _is_digit(CharType c) {
  243. return (c >= '0' && c <= '9');
  244. }
  245. static bool _is_hex_digit(CharType c) {
  246. return (c >= '0' && c <= '9') || (c >= 'a' && c <= 'f') || (c >= 'A' && c <= 'F');
  247. }
  248. static bool _is_binary_digit(CharType c) {
  249. return (c == '0' || c == '1');
  250. }
  251. GDScriptTokenizer::Token GDScriptTokenizer::make_token(Token::Type p_type) {
  252. Token token(p_type);
  253. token.start_line = start_line;
  254. token.end_line = line;
  255. token.start_column = start_column;
  256. token.end_column = column;
  257. token.leftmost_column = leftmost_column;
  258. token.rightmost_column = rightmost_column;
  259. token.source = String(_start, _current - _start);
  260. if (p_type != Token::ERROR && cursor_line > -1) {
  261. // Also count whitespace after token.
  262. int offset = 0;
  263. while (_peek(offset) == ' ' || _peek(offset) == '\t') {
  264. offset++;
  265. }
  266. int last_column = column + offset;
  267. // Check cursor position in token.
  268. if (start_line == line) {
  269. // Single line token.
  270. if (cursor_line == start_line && cursor_column >= start_column && cursor_column <= last_column) {
  271. token.cursor_position = cursor_column - start_column;
  272. if (cursor_column == start_column) {
  273. token.cursor_place = CURSOR_BEGINNING;
  274. } else if (cursor_column < column) {
  275. token.cursor_place = CURSOR_MIDDLE;
  276. } else {
  277. token.cursor_place = CURSOR_END;
  278. }
  279. }
  280. } else {
  281. // Multi line token.
  282. if (cursor_line == start_line && cursor_column >= start_column) {
  283. // Is in first line.
  284. token.cursor_position = cursor_column - start_column;
  285. if (cursor_column == start_column) {
  286. token.cursor_place = CURSOR_BEGINNING;
  287. } else {
  288. token.cursor_place = CURSOR_MIDDLE;
  289. }
  290. } else if (cursor_line == line && cursor_column <= last_column) {
  291. // Is in last line.
  292. token.cursor_position = cursor_column - start_column;
  293. if (cursor_column < column) {
  294. token.cursor_place = CURSOR_MIDDLE;
  295. } else {
  296. token.cursor_place = CURSOR_END;
  297. }
  298. } else if (cursor_line > start_line && cursor_line < line) {
  299. // Is in middle line.
  300. token.cursor_position = CURSOR_MIDDLE;
  301. }
  302. }
  303. }
  304. return token;
  305. }
  306. GDScriptTokenizer::Token GDScriptTokenizer::make_literal(const Variant &p_literal) {
  307. Token token = make_token(Token::LITERAL);
  308. token.literal = p_literal;
  309. return token;
  310. }
  311. GDScriptTokenizer::Token GDScriptTokenizer::make_identifier(const StringName &p_identifier) {
  312. Token identifier = make_token(Token::IDENTIFIER);
  313. identifier.literal = p_identifier;
  314. return identifier;
  315. }
  316. GDScriptTokenizer::Token GDScriptTokenizer::make_error(const String &p_message) {
  317. Token error = make_token(Token::ERROR);
  318. error.literal = p_message;
  319. return error;
  320. }
  321. void GDScriptTokenizer::push_error(const String &p_message) {
  322. Token error = make_error(p_message);
  323. error_stack.push_back(error);
  324. }
  325. void GDScriptTokenizer::push_error(const Token &p_error) {
  326. error_stack.push_back(p_error);
  327. }
  328. GDScriptTokenizer::Token GDScriptTokenizer::make_paren_error(CharType p_paren) {
  329. if (paren_stack.empty()) {
  330. return make_error(vformat("Closing \"%c\" doesn't have an opening counterpart.", p_paren));
  331. }
  332. Token error = make_error(vformat("Closing \"%c\" doesn't match the opening \"%c\".", p_paren, paren_stack.back()->get()));
  333. paren_stack.pop_back(); // Remove opening one anyway.
  334. return error;
  335. }
  336. GDScriptTokenizer::Token GDScriptTokenizer::check_vcs_marker(CharType p_test, Token::Type p_double_type) {
  337. const CharType *next = _current + 1;
  338. int chars = 2; // Two already matched.
  339. // Test before consuming characters, since we don't want to consume more than needed.
  340. while (*next == p_test) {
  341. chars++;
  342. next++;
  343. }
  344. if (chars >= 7) {
  345. // It is a VCS conflict marker.
  346. while (chars > 1) {
  347. // Consume all characters (first was already consumed by scan()).
  348. _advance();
  349. chars--;
  350. }
  351. return make_token(Token::VCS_CONFLICT_MARKER);
  352. } else {
  353. // It is only a regular double character token, so we consume the second character.
  354. _advance();
  355. return make_token(p_double_type);
  356. }
  357. }
  358. GDScriptTokenizer::Token GDScriptTokenizer::annotation() {
  359. if (!_is_alphanumeric(_peek())) {
  360. push_error("Expected annotation identifier after \"@\".");
  361. }
  362. while (_is_alphanumeric(_peek())) {
  363. // Consume all identifier characters.
  364. _advance();
  365. }
  366. Token annotation = make_token(Token::ANNOTATION);
  367. annotation.literal = StringName(annotation.source);
  368. return annotation;
  369. }
  370. GDScriptTokenizer::Token GDScriptTokenizer::potential_identifier() {
  371. #define KEYWORDS(KEYWORD_GROUP, KEYWORD) \
  372. KEYWORD_GROUP('a') \
  373. KEYWORD("as", Token::AS) \
  374. KEYWORD("and", Token::AND) \
  375. KEYWORD("assert", Token::ASSERT) \
  376. KEYWORD("await", Token::AWAIT) \
  377. KEYWORD_GROUP('b') \
  378. KEYWORD("break", Token::BREAK) \
  379. KEYWORD("breakpoint", Token::BREAKPOINT) \
  380. KEYWORD_GROUP('c') \
  381. KEYWORD("class", Token::CLASS) \
  382. KEYWORD("class_name", Token::CLASS_NAME) \
  383. KEYWORD("const", Token::CONST) \
  384. KEYWORD("continue", Token::CONTINUE) \
  385. KEYWORD_GROUP('e') \
  386. KEYWORD("elif", Token::ELIF) \
  387. KEYWORD("else", Token::ELSE) \
  388. KEYWORD("enum", Token::ENUM) \
  389. KEYWORD("extends", Token::EXTENDS) \
  390. KEYWORD_GROUP('f') \
  391. KEYWORD("for", Token::FOR) \
  392. KEYWORD("func", Token::FUNC) \
  393. KEYWORD_GROUP('i') \
  394. KEYWORD("if", Token::IF) \
  395. KEYWORD("in", Token::IN) \
  396. KEYWORD("is", Token::IS) \
  397. KEYWORD_GROUP('m') \
  398. KEYWORD("match", Token::MATCH) \
  399. KEYWORD_GROUP('n') \
  400. KEYWORD("namespace", Token::NAMESPACE) \
  401. KEYWORD("not", Token::NOT) \
  402. KEYWORD_GROUP('o') \
  403. KEYWORD("or", Token::OR) \
  404. KEYWORD_GROUP('p') \
  405. KEYWORD("pass", Token::PASS) \
  406. KEYWORD("preload", Token::PRELOAD) \
  407. KEYWORD_GROUP('r') \
  408. KEYWORD("return", Token::RETURN) \
  409. KEYWORD_GROUP('s') \
  410. KEYWORD("self", Token::SELF) \
  411. KEYWORD("signal", Token::SIGNAL) \
  412. KEYWORD("static", Token::STATIC) \
  413. KEYWORD("super", Token::SUPER) \
  414. KEYWORD_GROUP('t') \
  415. KEYWORD("trait", Token::TRAIT) \
  416. KEYWORD_GROUP('v') \
  417. KEYWORD("var", Token::VAR) \
  418. KEYWORD("void", Token::VOID) \
  419. KEYWORD_GROUP('w') \
  420. KEYWORD("while", Token::WHILE) \
  421. KEYWORD_GROUP('y') \
  422. KEYWORD("yield", Token::YIELD) \
  423. KEYWORD_GROUP('I') \
  424. KEYWORD("INF", Token::CONST_INF) \
  425. KEYWORD_GROUP('N') \
  426. KEYWORD("NAN", Token::CONST_NAN) \
  427. KEYWORD_GROUP('P') \
  428. KEYWORD("PI", Token::CONST_PI) \
  429. KEYWORD_GROUP('T') \
  430. KEYWORD("TAU", Token::CONST_TAU)
  431. #define MIN_KEYWORD_LENGTH 2
  432. #define MAX_KEYWORD_LENGTH 10
  433. // Consume all alphanumeric characters.
  434. while (_is_alphanumeric(_peek())) {
  435. _advance();
  436. }
  437. int length = _current - _start;
  438. if (length == 1 && _peek(-1) == '_') {
  439. // Lone underscore.
  440. return make_token(Token::UNDERSCORE);
  441. }
  442. String name(_start, length);
  443. if (length < MIN_KEYWORD_LENGTH || length > MAX_KEYWORD_LENGTH) {
  444. // Cannot be a keyword, as the length doesn't match any.
  445. return make_identifier(name);
  446. }
  447. // Define some helper macros for the switch case.
  448. #define KEYWORD_GROUP_CASE(char) \
  449. break; \
  450. case char:
  451. #define KEYWORD(keyword, token_type) \
  452. { \
  453. const int keyword_length = sizeof(keyword) - 1; \
  454. static_assert(keyword_length <= MAX_KEYWORD_LENGTH, "There's a keyword longer than the defined maximum length"); \
  455. static_assert(keyword_length >= MIN_KEYWORD_LENGTH, "There's a keyword shorter than the defined minimum length"); \
  456. if (keyword_length == length && name == keyword) { \
  457. return make_token(token_type); \
  458. } \
  459. }
  460. // Find if it's a keyword.
  461. switch (_start[0]) {
  462. default:
  463. KEYWORDS(KEYWORD_GROUP_CASE, KEYWORD)
  464. break;
  465. }
  466. // Check if it's a special literal
  467. if (length == 4) {
  468. if (name == "true") {
  469. return make_literal(true);
  470. } else if (name == "null") {
  471. return make_literal(Variant());
  472. }
  473. } else if (length == 5) {
  474. if (name == "false") {
  475. return make_literal(false);
  476. }
  477. }
  478. // Not a keyword, so must be an identifier.
  479. return make_identifier(name);
  480. #undef KEYWORDS
  481. #undef MIN_KEYWORD_LENGTH
  482. #undef MAX_KEYWORD_LENGTH
  483. #undef KEYWORD_GROUP_CASE
  484. #undef KEYWORD
  485. }
  486. void GDScriptTokenizer::newline(bool p_make_token) {
  487. // Don't overwrite previous newline, nor create if we want a line contination.
  488. if (p_make_token && !pending_newline && !line_continuation) {
  489. Token newline(Token::NEWLINE);
  490. newline.start_line = line;
  491. newline.end_line = line;
  492. newline.start_column = column - 1;
  493. newline.end_column = column;
  494. newline.leftmost_column = newline.start_column;
  495. newline.rightmost_column = newline.end_column;
  496. pending_newline = true;
  497. last_newline = newline;
  498. }
  499. // Increment line/column counters.
  500. line++;
  501. column = 1;
  502. leftmost_column = 1;
  503. }
  504. GDScriptTokenizer::Token GDScriptTokenizer::number() {
  505. int base = 10;
  506. bool has_decimal = false;
  507. bool has_exponent = false;
  508. bool has_error = false;
  509. bool (*digit_check_func)(CharType) = _is_digit;
  510. if (_peek(-1) == '.') {
  511. has_decimal = true;
  512. } else if (_peek(-1) == '0') {
  513. if (_peek() == 'x') {
  514. // Hexadecimal.
  515. base = 16;
  516. digit_check_func = _is_hex_digit;
  517. _advance();
  518. } else if (_peek() == 'b') {
  519. // Binary.
  520. base = 2;
  521. digit_check_func = _is_binary_digit;
  522. _advance();
  523. }
  524. }
  525. // Allow '_' to be used in a number, for readability.
  526. while (digit_check_func(_peek()) || _peek() == '_') {
  527. _advance();
  528. }
  529. // It might be a ".." token (instead of decimal point) so we check if it's not.
  530. if (_peek() == '.' && _peek(1) != '.') {
  531. if (base == 10 && !has_decimal) {
  532. has_decimal = true;
  533. } else if (base == 10) {
  534. Token error = make_error("Cannot use a decimal point twice in a number.");
  535. error.start_column = column;
  536. error.leftmost_column = column;
  537. error.end_column = column + 1;
  538. error.rightmost_column = column + 1;
  539. push_error(error);
  540. has_error = true;
  541. } else if (base == 16) {
  542. Token error = make_error("Cannot use a decimal point in a hexadecimal number.");
  543. error.start_column = column;
  544. error.leftmost_column = column;
  545. error.end_column = column + 1;
  546. error.rightmost_column = column + 1;
  547. push_error(error);
  548. has_error = true;
  549. } else {
  550. Token error = make_error("Cannot use a decimal point in a binary number.");
  551. error.start_column = column;
  552. error.leftmost_column = column;
  553. error.end_column = column + 1;
  554. error.rightmost_column = column + 1;
  555. push_error(error);
  556. has_error = true;
  557. }
  558. if (!has_error) {
  559. _advance();
  560. // Consume decimal digits.
  561. while (_is_digit(_peek()) || _peek() == '_') {
  562. _advance();
  563. }
  564. }
  565. }
  566. if (base == 10) {
  567. if (_peek() == 'e' || _peek() == 'E') {
  568. has_exponent = true;
  569. _advance();
  570. if (_peek() == '+' || _peek() == '-') {
  571. // Exponent sign.
  572. _advance();
  573. }
  574. // Consume exponent digits.
  575. while (_is_digit(_peek()) || _peek() == '_') {
  576. _advance();
  577. }
  578. }
  579. }
  580. // Detect extra decimal point.
  581. if (!has_error && has_decimal && _peek() == '.' && _peek(1) != '.') {
  582. Token error = make_error("Cannot use a decimal point twice in a number.");
  583. error.start_column = column;
  584. error.leftmost_column = column;
  585. error.end_column = column + 1;
  586. error.rightmost_column = column + 1;
  587. push_error(error);
  588. has_error = true;
  589. } else if (_is_alphanumeric(_peek())) {
  590. // Letter at the end of the number.
  591. push_error("Invalid numeric notation.");
  592. }
  593. // Create a string with the whole number.
  594. int length = _current - _start;
  595. String number = String(_start, length).replace("_", "");
  596. // Convert to the appropriate literal type.
  597. if (base == 16) {
  598. int64_t value = number.hex_to_int();
  599. return make_literal(value);
  600. } else if (base == 2) {
  601. int64_t value = number.bin_to_int();
  602. return make_literal(value);
  603. } else if (has_decimal || has_exponent) {
  604. double value = number.to_float();
  605. return make_literal(value);
  606. } else {
  607. int64_t value = number.to_int();
  608. return make_literal(value);
  609. }
  610. }
  611. GDScriptTokenizer::Token GDScriptTokenizer::string() {
  612. enum StringType {
  613. STRING_REGULAR,
  614. STRING_NAME,
  615. STRING_NODEPATH,
  616. };
  617. bool is_multiline = false;
  618. StringType type = STRING_REGULAR;
  619. if (_peek(-1) == '&') {
  620. type = STRING_NAME;
  621. _advance();
  622. } else if (_peek(-1) == '^') {
  623. type = STRING_NODEPATH;
  624. _advance();
  625. }
  626. CharType quote_char = _peek(-1);
  627. if (_peek() == quote_char && _peek(1) == quote_char) {
  628. is_multiline = true;
  629. // Consume all quotes.
  630. _advance();
  631. _advance();
  632. }
  633. String result;
  634. for (;;) {
  635. // Consume actual string.
  636. if (_is_at_end()) {
  637. return make_error("Unterminated string.");
  638. }
  639. CharType ch = _peek();
  640. if (ch == '\\') {
  641. // Escape pattern.
  642. _advance();
  643. if (_is_at_end()) {
  644. return make_error("Unterminated string.");
  645. }
  646. // Grab escape character.
  647. CharType code = _peek();
  648. _advance();
  649. if (_is_at_end()) {
  650. return make_error("Unterminated string.");
  651. }
  652. CharType escaped = 0;
  653. bool valid_escape = true;
  654. switch (code) {
  655. case 'a':
  656. escaped = '\a';
  657. break;
  658. case 'b':
  659. escaped = '\b';
  660. break;
  661. case 'f':
  662. escaped = '\f';
  663. break;
  664. case 'n':
  665. escaped = '\n';
  666. break;
  667. case 'r':
  668. escaped = '\r';
  669. break;
  670. case 't':
  671. escaped = '\t';
  672. break;
  673. case 'v':
  674. escaped = '\v';
  675. break;
  676. case '\'':
  677. escaped = '\'';
  678. break;
  679. case '\"':
  680. escaped = '\"';
  681. break;
  682. case '\\':
  683. escaped = '\\';
  684. break;
  685. case 'u':
  686. // Hexadecimal sequence.
  687. for (int i = 0; i < 4; i++) {
  688. if (_is_at_end()) {
  689. return make_error("Unterminated string.");
  690. }
  691. CharType digit = _peek();
  692. CharType value = 0;
  693. if (digit >= '0' && digit <= '9') {
  694. value = digit - '0';
  695. } else if (digit >= 'a' && digit <= 'f') {
  696. value = digit - 'a';
  697. value += 10;
  698. } else if (digit >= 'A' && digit <= 'F') {
  699. value = digit - 'A';
  700. value += 10;
  701. } else {
  702. // Make error, but keep parsing the string.
  703. Token error = make_error("Invalid hexadecimal digit in unicode escape sequence.");
  704. error.start_column = column;
  705. error.leftmost_column = error.start_column;
  706. error.end_column = column + 1;
  707. error.rightmost_column = error.end_column;
  708. push_error(error);
  709. valid_escape = false;
  710. break;
  711. }
  712. escaped <<= 4;
  713. escaped |= value;
  714. _advance();
  715. }
  716. break;
  717. case '\r':
  718. if (_peek() != '\n') {
  719. // Carriage return without newline in string. (???)
  720. // Just add it to the string and keep going.
  721. result += ch;
  722. _advance();
  723. break;
  724. }
  725. [[fallthrough]];
  726. case '\n':
  727. // Escaping newline.
  728. newline(false);
  729. valid_escape = false; // Don't add to the string.
  730. break;
  731. default:
  732. Token error = make_error("Invalid escape in string.");
  733. error.start_column = column - 2;
  734. error.leftmost_column = error.start_column;
  735. push_error(error);
  736. valid_escape = false;
  737. break;
  738. }
  739. if (valid_escape) {
  740. result += escaped;
  741. }
  742. } else if (ch == quote_char) {
  743. _advance();
  744. if (is_multiline) {
  745. if (_peek() == quote_char && _peek(1) == quote_char) {
  746. // Ended the multiline string. Consume all quotes.
  747. _advance();
  748. _advance();
  749. break;
  750. }
  751. } else {
  752. // Ended single-line string.
  753. break;
  754. }
  755. } else {
  756. result += ch;
  757. _advance();
  758. if (ch == '\n') {
  759. newline(false);
  760. }
  761. }
  762. }
  763. // Make the literal.
  764. Variant string;
  765. switch (type) {
  766. case STRING_NAME:
  767. string = StringName(result);
  768. break;
  769. case STRING_NODEPATH:
  770. string = NodePath(result);
  771. break;
  772. case STRING_REGULAR:
  773. string = result;
  774. break;
  775. }
  776. return make_literal(string);
  777. }
  778. void GDScriptTokenizer::check_indent() {
  779. ERR_FAIL_COND_MSG(column != 1, "Checking tokenizer indentation in the middle of a line.");
  780. if (_is_at_end()) {
  781. // Send dedents for every indent level.
  782. pending_indents -= indent_level();
  783. indent_stack.clear();
  784. return;
  785. }
  786. for (;;) {
  787. CharType current_indent_char = _peek();
  788. int indent_count = 0;
  789. if (current_indent_char != ' ' && current_indent_char != '\t' && current_indent_char != '\r' && current_indent_char != '\n' && current_indent_char != '#') {
  790. // First character of the line is not whitespace, so we clear all indentation levels.
  791. // Unless we are in a continuation or in multiline mode (inside expression).
  792. if (line_continuation || multiline_mode) {
  793. return;
  794. }
  795. pending_indents -= indent_level();
  796. indent_stack.clear();
  797. return;
  798. }
  799. if (_peek() == '\r') {
  800. _advance();
  801. if (_peek() != '\n') {
  802. push_error("Stray carriage return character in source code.");
  803. }
  804. }
  805. if (_peek() == '\n') {
  806. // Empty line, keep going.
  807. _advance();
  808. newline(false);
  809. continue;
  810. }
  811. // Check indent level.
  812. bool mixed = false;
  813. while (!_is_at_end()) {
  814. CharType space = _peek();
  815. if (space == '\t') {
  816. // Consider individual tab columns.
  817. column += tab_size - 1;
  818. indent_count += tab_size;
  819. } else if (space == ' ') {
  820. indent_count += 1;
  821. } else {
  822. break;
  823. }
  824. mixed = mixed || space != current_indent_char;
  825. _advance();
  826. }
  827. if (mixed) {
  828. Token error = make_error("Mixed use of tabs and spaces for indentation.");
  829. error.start_line = line;
  830. error.start_column = 1;
  831. error.leftmost_column = 1;
  832. error.rightmost_column = column;
  833. push_error(error);
  834. }
  835. if (_is_at_end()) {
  836. // Reached the end with an empty line, so just dedent as much as needed.
  837. pending_indents -= indent_level();
  838. indent_stack.clear();
  839. return;
  840. }
  841. if (_peek() == '\r') {
  842. _advance();
  843. if (_peek() != '\n') {
  844. push_error("Stray carriage return character in source code.");
  845. }
  846. }
  847. if (_peek() == '\n') {
  848. // Empty line, keep going.
  849. _advance();
  850. newline(false);
  851. continue;
  852. }
  853. if (_peek() == '#') {
  854. // Comment. Advance to the next line.
  855. while (_peek() != '\n' && !_is_at_end()) {
  856. _advance();
  857. }
  858. if (_is_at_end()) {
  859. // Reached the end with an empty line, so just dedent as much as needed.
  860. pending_indents -= indent_level();
  861. indent_stack.clear();
  862. return;
  863. }
  864. _advance(); // Consume '\n'.
  865. newline(false);
  866. continue;
  867. }
  868. if (line_continuation || multiline_mode) {
  869. // We cleared up all the whitespace at the beginning of the line.
  870. // But if this is a continuation or multiline mode and we don't want any indentation change.
  871. return;
  872. }
  873. // Check if indentation character is consistent.
  874. if (indent_char == '\0') {
  875. // First time indenting, choose character now.
  876. indent_char = current_indent_char;
  877. } else if (current_indent_char != indent_char) {
  878. Token error = make_error(vformat("Used \"%c\" for indentation instead \"%c\" as used before in the file.", String(&current_indent_char, 1).c_escape(), String(&indent_char, 1).c_escape()));
  879. error.start_line = line;
  880. error.start_column = 1;
  881. error.leftmost_column = 1;
  882. error.rightmost_column = column;
  883. push_error(error);
  884. }
  885. // Now we can do actual indentation changes.
  886. // Check if indent or dedent.
  887. int previous_indent = 0;
  888. if (indent_level() > 0) {
  889. previous_indent = indent_stack.back()->get();
  890. }
  891. if (indent_count == previous_indent) {
  892. // No change in indentation.
  893. return;
  894. }
  895. if (indent_count > previous_indent) {
  896. // Indentation increased.
  897. indent_stack.push_back(indent_count);
  898. pending_indents++;
  899. } else {
  900. // Indentation decreased (dedent).
  901. if (indent_level() == 0) {
  902. push_error("Tokenizer bug: trying to dedent without previous indent.");
  903. return;
  904. }
  905. while (indent_level() > 0 && indent_stack.back()->get() > indent_count) {
  906. indent_stack.pop_back();
  907. pending_indents--;
  908. }
  909. if ((indent_level() > 0 && indent_stack.back()->get() != indent_count) || (indent_level() == 0 && indent_count != 0)) {
  910. // Mismatched indentation alignment.
  911. Token error = make_error("Unindent doesn't match the previous indentation level.");
  912. error.start_line = line;
  913. error.start_column = 1;
  914. error.leftmost_column = 1;
  915. error.end_column = column + 1;
  916. error.rightmost_column = column + 1;
  917. push_error(error);
  918. // Still, we'll be lenient and keep going, so keep this level in the stack.
  919. indent_stack.push_back(indent_count);
  920. }
  921. }
  922. break; // Get out of the loop in any case.
  923. }
  924. }
  925. void GDScriptTokenizer::_skip_whitespace() {
  926. if (pending_indents != 0) {
  927. // Still have some indent/dedent tokens to give.
  928. return;
  929. }
  930. bool is_bol = column == 1; // Beginning of line.
  931. if (is_bol) {
  932. check_indent();
  933. return;
  934. }
  935. for (;;) {
  936. CharType c = _peek();
  937. switch (c) {
  938. case ' ':
  939. _advance();
  940. break;
  941. case '\t':
  942. _advance();
  943. // Consider individual tab columns.
  944. column += tab_size - 1;
  945. break;
  946. case '\r':
  947. _advance(); // Consume either way.
  948. if (_peek() != '\n') {
  949. push_error("Stray carriage return character in source code.");
  950. return;
  951. }
  952. break;
  953. case '\n':
  954. _advance();
  955. newline(!is_bol); // Don't create new line token if line is empty.
  956. check_indent();
  957. break;
  958. case '#':
  959. // Comment.
  960. while (_peek() != '\n' && !_is_at_end()) {
  961. _advance();
  962. }
  963. if (_is_at_end()) {
  964. return;
  965. }
  966. _advance(); // Consume '\n'
  967. newline(!is_bol);
  968. check_indent();
  969. break;
  970. default:
  971. return;
  972. }
  973. }
  974. }
  975. GDScriptTokenizer::Token GDScriptTokenizer::scan() {
  976. if (has_error()) {
  977. return pop_error();
  978. }
  979. _skip_whitespace();
  980. if (pending_newline) {
  981. pending_newline = false;
  982. if (!multiline_mode) {
  983. // Don't return newline tokens on multine mode.
  984. return last_newline;
  985. }
  986. }
  987. // Check for potential errors after skipping whitespace().
  988. if (has_error()) {
  989. return pop_error();
  990. }
  991. _start = _current;
  992. start_line = line;
  993. start_column = column;
  994. leftmost_column = column;
  995. rightmost_column = column;
  996. if (pending_indents != 0) {
  997. // Adjust position for indent.
  998. _start -= start_column - 1;
  999. start_column = 1;
  1000. leftmost_column = 1;
  1001. if (pending_indents > 0) {
  1002. // Indents.
  1003. pending_indents--;
  1004. return make_token(Token::INDENT);
  1005. } else {
  1006. // Dedents.
  1007. pending_indents++;
  1008. Token dedent = make_token(Token::DEDENT);
  1009. dedent.end_column += 1;
  1010. dedent.rightmost_column += 1;
  1011. return dedent;
  1012. }
  1013. }
  1014. if (_is_at_end()) {
  1015. return make_token(Token::TK_EOF);
  1016. }
  1017. const CharType c = _advance();
  1018. if (c == '\\') {
  1019. // Line continuation with backslash.
  1020. if (_peek() == '\r') {
  1021. if (_peek(1) != '\n') {
  1022. return make_error("Unexpected carriage return character.");
  1023. }
  1024. _advance();
  1025. }
  1026. if (_peek() != '\n') {
  1027. return make_error("Expected new line after \"\\\".");
  1028. }
  1029. _advance();
  1030. newline(false);
  1031. line_continuation = true;
  1032. return scan(); // Recurse to get next token.
  1033. }
  1034. line_continuation = false;
  1035. if (_is_digit(c)) {
  1036. return number();
  1037. } else if (_is_alphanumeric(c)) {
  1038. return potential_identifier();
  1039. }
  1040. switch (c) {
  1041. // String literals.
  1042. case '"':
  1043. case '\'':
  1044. return string();
  1045. // Annotation.
  1046. case '@':
  1047. return annotation();
  1048. // Single characters.
  1049. case '~':
  1050. return make_token(Token::TILDE);
  1051. case ',':
  1052. return make_token(Token::COMMA);
  1053. case ':':
  1054. return make_token(Token::COLON);
  1055. case ';':
  1056. return make_token(Token::SEMICOLON);
  1057. case '$':
  1058. return make_token(Token::DOLLAR);
  1059. case '?':
  1060. return make_token(Token::QUESTION_MARK);
  1061. case '`':
  1062. return make_token(Token::BACKTICK);
  1063. // Parens.
  1064. case '(':
  1065. push_paren('(');
  1066. return make_token(Token::PARENTHESIS_OPEN);
  1067. case '[':
  1068. push_paren('[');
  1069. return make_token(Token::BRACKET_OPEN);
  1070. case '{':
  1071. push_paren('{');
  1072. return make_token(Token::BRACE_OPEN);
  1073. case ')':
  1074. if (!pop_paren('(')) {
  1075. return make_paren_error(c);
  1076. }
  1077. return make_token(Token::PARENTHESIS_CLOSE);
  1078. case ']':
  1079. if (!pop_paren('[')) {
  1080. return make_paren_error(c);
  1081. }
  1082. return make_token(Token::BRACKET_CLOSE);
  1083. case '}':
  1084. if (!pop_paren('{')) {
  1085. return make_paren_error(c);
  1086. }
  1087. return make_token(Token::BRACE_CLOSE);
  1088. // Double characters.
  1089. case '!':
  1090. if (_peek() == '=') {
  1091. _advance();
  1092. return make_token(Token::BANG_EQUAL);
  1093. } else {
  1094. return make_token(Token::BANG);
  1095. }
  1096. case '.':
  1097. if (_peek() == '.') {
  1098. _advance();
  1099. return make_token(Token::PERIOD_PERIOD);
  1100. } else if (_is_digit(_peek())) {
  1101. // Number starting with '.'.
  1102. return number();
  1103. } else {
  1104. return make_token(Token::PERIOD);
  1105. }
  1106. case '+':
  1107. if (_peek() == '=') {
  1108. _advance();
  1109. return make_token(Token::PLUS_EQUAL);
  1110. } else {
  1111. return make_token(Token::PLUS);
  1112. }
  1113. case '-':
  1114. if (_peek() == '=') {
  1115. _advance();
  1116. return make_token(Token::MINUS_EQUAL);
  1117. } else if (_peek() == '>') {
  1118. _advance();
  1119. return make_token(Token::FORWARD_ARROW);
  1120. } else {
  1121. return make_token(Token::MINUS);
  1122. }
  1123. case '*':
  1124. if (_peek() == '=') {
  1125. _advance();
  1126. return make_token(Token::STAR_EQUAL);
  1127. } else {
  1128. return make_token(Token::STAR);
  1129. }
  1130. case '/':
  1131. if (_peek() == '=') {
  1132. _advance();
  1133. return make_token(Token::SLASH_EQUAL);
  1134. } else {
  1135. return make_token(Token::SLASH);
  1136. }
  1137. case '%':
  1138. if (_peek() == '=') {
  1139. _advance();
  1140. return make_token(Token::PERCENT_EQUAL);
  1141. } else {
  1142. return make_token(Token::PERCENT);
  1143. }
  1144. case '^':
  1145. if (_peek() == '=') {
  1146. _advance();
  1147. return make_token(Token::CARET_EQUAL);
  1148. } else if (_peek() == '"' || _peek() == '\'') {
  1149. // Node path
  1150. return string();
  1151. } else {
  1152. return make_token(Token::CARET);
  1153. }
  1154. case '&':
  1155. if (_peek() == '&') {
  1156. _advance();
  1157. return make_token(Token::AMPERSAND_AMPERSAND);
  1158. } else if (_peek() == '=') {
  1159. _advance();
  1160. return make_token(Token::AMPERSAND_EQUAL);
  1161. } else if (_peek() == '"' || _peek() == '\'') {
  1162. // String Name
  1163. return string();
  1164. } else {
  1165. return make_token(Token::AMPERSAND);
  1166. }
  1167. case '|':
  1168. if (_peek() == '|') {
  1169. _advance();
  1170. return make_token(Token::PIPE_PIPE);
  1171. } else if (_peek() == '=') {
  1172. _advance();
  1173. return make_token(Token::PIPE_EQUAL);
  1174. } else {
  1175. return make_token(Token::PIPE);
  1176. }
  1177. // Potential VCS conflict markers.
  1178. case '=':
  1179. if (_peek() == '=') {
  1180. return check_vcs_marker('=', Token::EQUAL_EQUAL);
  1181. } else {
  1182. return make_token(Token::EQUAL);
  1183. }
  1184. case '<':
  1185. if (_peek() == '=') {
  1186. _advance();
  1187. return make_token(Token::LESS_EQUAL);
  1188. } else if (_peek() == '<') {
  1189. if (_peek(1) == '=') {
  1190. _advance();
  1191. _advance(); // Advance both '<' and '='
  1192. return make_token(Token::LESS_LESS_EQUAL);
  1193. } else {
  1194. return check_vcs_marker('<', Token::LESS_LESS);
  1195. }
  1196. } else {
  1197. return make_token(Token::LESS);
  1198. }
  1199. case '>':
  1200. if (_peek() == '=') {
  1201. _advance();
  1202. return make_token(Token::GREATER_EQUAL);
  1203. } else if (_peek() == '>') {
  1204. if (_peek(1) == '=') {
  1205. _advance();
  1206. _advance(); // Advance both '>' and '='
  1207. return make_token(Token::GREATER_GREATER_EQUAL);
  1208. } else {
  1209. return check_vcs_marker('>', Token::GREATER_GREATER);
  1210. }
  1211. } else {
  1212. return make_token(Token::GREATER);
  1213. }
  1214. default:
  1215. return make_error(vformat(R"(Unknown character "%s".")", String(&c, 1)));
  1216. }
  1217. }
  1218. GDScriptTokenizer::GDScriptTokenizer() {
  1219. #ifdef TOOLS_ENABLED
  1220. if (EditorSettings::get_singleton()) {
  1221. tab_size = EditorSettings::get_singleton()->get_setting("text_editor/indent/size");
  1222. }
  1223. #endif // TOOLS_ENABLED
  1224. }