arm.inc 31 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256
  1. {
  2. This file is part of the Free Pascal run time library.
  3. Copyright (c) 2003 by the Free Pascal development team.
  4. Processor dependent implementation for the system unit for
  5. ARM
  6. See the file COPYING.FPC, included in this distribution,
  7. for details about the copyright.
  8. This program is distributed in the hope that it will be useful,
  9. but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  11. **********************************************************************}
  12. { IMPORTANT!
  13. Never use the "BLX label" instruction! Use "BL label" instead.
  14. The linker will always change BL to BLX if necessary, but not vice versa (linker version dependent).
  15. "BLX label" ALWAYS changes the instruction set. It changes a processor in ARM state to Thumb state,
  16. or a processor in Thumb state to ARM state.
  17. }
  18. {$ifndef FPC_SYSTEM_HAS_MOVE}
  19. {$define FPC_SYSTEM_FPC_MOVE}
  20. {$endif FPC_SYSTEM_HAS_MOVE}
  21. {$ifdef FPC_SYSTEM_FPC_MOVE}
  22. const
  23. cpu_has_edsp : boolean = false;
  24. in_edsp_test : boolean = false;
  25. {$endif FPC_SYSTEM_FPC_MOVE}
  26. {$if not(defined(wince)) and not(defined(gba)) and not(defined(nds)) and not(defined(FPUSOFT)) and not(defined(FPULIBGCC))}
  27. {$define FPC_SYSTEM_HAS_SYSINITFPU}
  28. {$if not defined(darwin) and not defined(FPUVFPV2) and not defined(FPUVFPV3) and not defined(FPUVFPV4) and not defined(FPUVFPV3_D16)}
  29. Procedure SysInitFPU;{$ifdef SYSTEMINLINE}inline;{$endif}
  30. begin
  31. { Enable FPU exceptions, but disable INEXACT, UNDERFLOW, DENORMAL }
  32. asm
  33. rfs r0
  34. and r0,r0,#0xffe0ffff
  35. orr r0,r0,#0x00070000
  36. wfs r0
  37. end;
  38. end;
  39. {$else}
  40. Procedure SysInitFPU;{$ifdef SYSTEMINLINE}inline;{$endif}
  41. begin
  42. { Enable FPU exceptions, but disable INEXACT, UNDERFLOW, DENORMAL }
  43. asm
  44. fmrx r0,fpscr
  45. // set "round to nearest" mode
  46. and r0,r0,#0xff3fffff
  47. // mask "exception happened" and overflow flags
  48. and r0,r0,#0xffffff20
  49. // mask exception flags
  50. and r0,r0,#0xffff40ff
  51. {$ifndef darwin}
  52. // Floating point exceptions cause kernel panics on iPhoneOS 2.2.1...
  53. // disable flush-to-zero mode (IEEE math compliant)
  54. and r0,r0,#0xfeffffff
  55. // enable invalid operation, div-by-zero and overflow exceptions
  56. orr r0,r0,#0x00000700
  57. {$endif}
  58. fmxr fpscr,r0
  59. end;
  60. end;
  61. {$endif}
  62. {$endif}
  63. procedure fpc_cpuinit;
  64. begin
  65. { don't let libraries influence the FPU cw set by the host program }
  66. if not IsLibrary then
  67. SysInitFPU;
  68. end;
  69. {$ifdef wince}
  70. function _controlfp(new: DWORD; mask: DWORD): DWORD; cdecl; external 'coredll';
  71. {$define FPC_SYSTEM_HAS_SYSRESETFPU}
  72. Procedure SysResetFPU;{$ifdef SYSTEMINLINE}inline;{$endif}
  73. begin
  74. end;
  75. {$define FPC_SYSTEM_HAS_SYSINITFPU}
  76. Procedure SysInitFPU;{$ifdef SYSTEMINLINE}inline;{$endif}
  77. begin
  78. { Enable FPU exceptions, but disable INEXACT, UNDERFLOW, DENORMAL }
  79. { FPU precision 64 bit, rounding to nearest, affine infinity }
  80. _controlfp($000C0003, $030F031F);
  81. end;
  82. {$endif wince}
  83. {$ifdef linux}
  84. function fpc_read_tp : pointer; [public, alias: 'fpc_read_tp'];assembler; nostackframe;
  85. asm
  86. // Helper is located at 0xffff0fe0
  87. mvn r0,#0x0000f000 // mov r0, #0xffff0fff
  88. sub pc,r0,#0x1f // Jump to helper
  89. end;
  90. {$endif linux}
  91. {****************************************************************************
  92. stack frame related stuff
  93. ****************************************************************************}
  94. {$IFNDEF INTERNAL_BACKTRACE}
  95. {$define FPC_SYSTEM_HAS_GET_FRAME}
  96. function get_frame:pointer;assembler;nostackframe;
  97. asm
  98. {$ifndef darwin}
  99. mov r0,r11
  100. {$else}
  101. mov r0,r7
  102. {$endif}
  103. end;
  104. {$ENDIF not INTERNAL_BACKTRACE}
  105. {$define FPC_SYSTEM_HAS_GET_CALLER_ADDR}
  106. function get_caller_addr(framebp:pointer;addr:pointer=nil):pointer;assembler;nostackframe;
  107. asm
  108. cmp r0,#0
  109. {$ifndef darwin}
  110. ldrne r0,[r0,#-4]
  111. {$else}
  112. ldrne r0,[r0,#4]
  113. {$endif}
  114. end;
  115. {$define FPC_SYSTEM_HAS_GET_CALLER_FRAME}
  116. function get_caller_frame(framebp:pointer;addr:pointer=nil):pointer;assembler;nostackframe;
  117. asm
  118. cmp r0,#0
  119. {$ifndef darwin}
  120. ldrne r0,[r0,#-12]
  121. {$else}
  122. ldrne r0,[r0]
  123. {$endif}
  124. end;
  125. {$define FPC_SYSTEM_HAS_SPTR}
  126. Function Sptr : pointer;assembler;nostackframe;
  127. asm
  128. mov r0,sp
  129. end;
  130. {$ifndef FPC_SYSTEM_HAS_FILLCHAR}
  131. {$define FPC_SYSTEM_HAS_FILLCHAR}
  132. Procedure FillChar(var x;count:longint;value:byte);assembler;nostackframe;
  133. asm
  134. // less than 0?
  135. cmp r1,#0
  136. {$ifdef CPUARM_HAS_BX}
  137. bxle lr
  138. {$else}
  139. movle pc,lr
  140. {$endif}
  141. mov r3,r0
  142. orr r2,r2,r2,lsl #8
  143. orr r2,r2,r2,lsl #16
  144. tst r3, #3 // Aligned?
  145. bne .LFillchar_do_align
  146. .LFillchar_is_aligned:
  147. subs r1,r1,#8
  148. bmi .LFillchar_less_than_8bytes
  149. mov ip,r2
  150. .LFillchar_at_least_8bytes:
  151. // Do 16 bytes per loop
  152. // More unrolling is uncessary, as we'll just stall on the write buffers
  153. stmia r3!,{r2,ip}
  154. subs r1,r1,#8
  155. stmplia r3!,{r2,ip}
  156. subpls r1,r1,#8
  157. bpl .LFillchar_at_least_8bytes
  158. .LFillchar_less_than_8bytes:
  159. // Do the rest
  160. adds r1, r1, #8
  161. {$ifdef CPUARM_HAS_BX}
  162. bxeq lr
  163. {$else}
  164. moveq pc,lr
  165. {$endif}
  166. tst r1, #4
  167. strne r2,[r3],#4
  168. {$ifdef CPUARM_HAS_ALL_MEM}
  169. tst r1, #2
  170. strneh r2,[r3],#2
  171. {$else CPUARM_HAS_ALL_MEM}
  172. tst r1, #2
  173. strneb r2,[r3],#1
  174. strneb r2,[r3],#1
  175. {$endif CPUARM_HAS_ALL_MEM}
  176. tst r1, #1
  177. strneb r2,[r3],#1
  178. {$ifdef CPUARM_HAS_BX}
  179. bx lr
  180. {$else}
  181. mov pc,lr
  182. {$endif}
  183. // Special case for unaligned start
  184. // We make a maximum of 3 loops here
  185. .LFillchar_do_align:
  186. strb r2,[r3],#1
  187. subs r1, r1, #1
  188. {$ifdef CPUARM_HAS_BX}
  189. bxeq lr
  190. {$else}
  191. moveq pc,lr
  192. {$endif}
  193. tst r3,#3
  194. bne .LFillchar_do_align
  195. b .LFillchar_is_aligned
  196. end;
  197. {$endif FPC_SYSTEM_HAS_FILLCHAR}
  198. {$ifndef FPC_SYSTEM_HAS_MOVE}
  199. {$define FPC_SYSTEM_HAS_MOVE}
  200. {$ifdef CPUARM_HAS_EDSP}
  201. procedure Move(const source;var dest;count:longint);[public, alias: 'FPC_MOVE'];assembler;nostackframe;
  202. {$else CPUARM_HAS_EDSP}
  203. procedure Move_pld(const source;var dest;count:longint);assembler;nostackframe;
  204. {$endif CPUARM_HAS_EDSP}
  205. asm
  206. // pld [r0]
  207. // encode this using .long so the rtl assembles also with instructions sets not supporting pld
  208. .long 0xf5d0f000
  209. // count <=0 ?
  210. cmp r2,#0
  211. {$ifdef CPUARM_HAS_BX}
  212. bxle lr
  213. {$else}
  214. movle pc,lr
  215. {$endif}
  216. // overlap?
  217. subs r3, r1, r0 // if (dest > source) and
  218. cmphi r2, r3 // (count > dest - src) then
  219. bhi .Loverlapped // DoReverseByteCopy;
  220. cmp r2,#8 // if (count < 8) then
  221. blt .Lbyteloop // DoForwardByteCopy;
  222. // Any way to avoid the above jump and fuse the next two instructions?
  223. tst r0, #3 // if (source and 3) <> 0 or
  224. tsteq r1, #3 // (dest and 3) <> 0 then
  225. bne .Lbyteloop // DoForwardByteCopy;
  226. // pld [r0,#32]
  227. // encode this using .long so the rtl assembles also with instructions sets not supporting pld
  228. .long 0xf5d0f020
  229. .Ldwordloop:
  230. ldmia r0!, {r3, ip}
  231. // preload
  232. // pld [r0,#64]
  233. // encode this using .long so the rtl assembles also with instructions sets not supporting pld
  234. .long 0xf5d0f040
  235. sub r2,r2,#8
  236. cmp r2, #8
  237. stmia r1!, {r3, ip}
  238. bge .Ldwordloop
  239. cmp r2,#0
  240. {$ifdef CPUARM_HAS_BX}
  241. bxeq lr
  242. {$else}
  243. moveq pc,lr
  244. {$endif}
  245. .Lbyteloop:
  246. subs r2,r2,#1
  247. ldrb r3,[r0],#1
  248. strb r3,[r1],#1
  249. bne .Lbyteloop
  250. {$ifdef CPUARM_HAS_BX}
  251. bx lr
  252. {$else}
  253. mov pc,lr
  254. {$endif}
  255. .Loverlapped:
  256. subs r2,r2,#1
  257. ldrb r3,[r0,r2]
  258. strb r3,[r1,r2]
  259. bne .Loverlapped
  260. end;
  261. {$ifndef CPUARM_HAS_EDSP}
  262. procedure Move_blended(const source;var dest;count:longint);assembler;nostackframe;
  263. asm
  264. // count <=0 ?
  265. cmp r2,#0
  266. {$ifdef CPUARM_HAS_BX}
  267. bxle lr
  268. {$else}
  269. movle pc,lr
  270. {$endif}
  271. // overlap?
  272. subs r3, r1, r0 // if (dest > source) and
  273. cmphi r2, r3 // (count > dest - src) then
  274. bhi .Loverlapped // DoReverseByteCopy;
  275. cmp r2,#8 // if (count < 8) then
  276. blt .Lbyteloop // DoForwardByteCopy;
  277. // Any way to avoid the above jump and fuse the next two instructions?
  278. tst r0, #3 // if (source and 3) <> 0 or
  279. tsteq r1, #3 // (dest and 3) <> 0 then
  280. bne .Lbyteloop // DoForwardByteCopy;
  281. .Ldwordloop:
  282. ldmia r0!, {r3, ip}
  283. sub r2,r2,#8
  284. cmp r2, #8
  285. stmia r1!, {r3, ip}
  286. bge .Ldwordloop
  287. cmp r2,#0
  288. {$ifdef CPUARM_HAS_BX}
  289. bxeq lr
  290. {$else}
  291. moveq pc,lr
  292. {$endif}
  293. .Lbyteloop:
  294. subs r2,r2,#1
  295. ldrb r3,[r0],#1
  296. strb r3,[r1],#1
  297. bne .Lbyteloop
  298. {$ifdef CPUARM_HAS_BX}
  299. bx lr
  300. {$else}
  301. mov pc,lr
  302. {$endif}
  303. .Loverlapped:
  304. subs r2,r2,#1
  305. ldrb r3,[r0,r2]
  306. strb r3,[r1,r2]
  307. bne .Loverlapped
  308. end;
  309. const
  310. moveproc : procedure(const source;var dest;count:longint) = @move_blended;
  311. procedure Move(const source;var dest;count:longint);[public, alias: 'FPC_MOVE']; {$ifndef FPC_PIC} assembler;nostackframe; {$endif FPC_PIC}
  312. {$ifdef FPC_PIC}
  313. begin
  314. moveproc(source,dest,count);
  315. end;
  316. {$else FPC_PIC}
  317. asm
  318. ldr ip,.Lmoveproc
  319. ldr pc,[ip]
  320. .Lmoveproc:
  321. .long moveproc
  322. end;
  323. {$endif FPC_PIC}
  324. {$endif CPUARM_HAS_EDSP}
  325. {$endif FPC_SYSTEM_HAS_MOVE}
  326. {****************************************************************************
  327. String
  328. ****************************************************************************}
  329. {$ifndef FPC_SYSTEM_HAS_FPC_SHORTSTR_ASSIGN}
  330. {$define FPC_SYSTEM_HAS_FPC_SHORTSTR_ASSIGN}
  331. procedure fpc_shortstr_to_shortstr(out res:shortstring;const sstr:shortstring);assembler;nostackframe;[public,alias: 'FPC_SHORTSTR_TO_SHORTSTR'];compilerproc;
  332. {r0: __RESULT
  333. r1: len
  334. r2: sstr}
  335. asm
  336. ldrb r12,[r2],#1
  337. cmp r12,r1
  338. movgt r12,r1
  339. strb r12,[r0],#1
  340. cmp r12,#6 (* 6 seems to be the break even point. *)
  341. blt .LStartTailCopy
  342. (* Align destination on 32bits. This is the only place where unrolling
  343. really seems to help, since in the common case, sstr is aligned on
  344. 32 bits, therefore in the common case we need to copy 3 bytes to
  345. align, i.e. in the case of a loop, you wouldn't branch out early.*)
  346. rsb r3,r0,#0
  347. ands r3,r3,#3
  348. sub r12,r12,r3
  349. ldrneb r1,[r2],#1
  350. strneb r1,[r0],#1
  351. subnes r3,r3,#1
  352. ldrneb r1,[r2],#1
  353. strneb r1,[r0],#1
  354. subnes r3,r3,#1
  355. ldrneb r1,[r2],#1
  356. strneb r1,[r0],#1
  357. subnes r3,r3,#1
  358. .LDoneAlign:
  359. (* Destination should be aligned now, but source might not be aligned,
  360. if this is the case, do a byte-per-byte copy. *)
  361. tst r2,#3
  362. bne .LStartTailCopy
  363. (* Start the main copy, 32 bit at a time. *)
  364. movs r3,r12,lsr #2
  365. and r12,r12,#3
  366. beq .LStartTailCopy
  367. .LNext4bytes:
  368. (* Unrolling this loop would save a little bit of time for long strings
  369. (>20 chars), but alas, it hurts for short strings and they are the
  370. common case.*)
  371. ldrne r1,[r2],#4
  372. strne r1,[r0],#4
  373. subnes r3,r3,#1
  374. bne .LNext4bytes
  375. .LStartTailCopy:
  376. (* Do remaining bytes. *)
  377. cmp r12,#0
  378. beq .LDoneTail
  379. .LNextChar3:
  380. ldrb r1,[r2],#1
  381. strb r1,[r0],#1
  382. subs r12,r12,#1
  383. bne .LNextChar3
  384. .LDoneTail:
  385. end;
  386. procedure fpc_shortstr_assign(len:longint;sstr,dstr:pointer);assembler;nostackframe;[public,alias:'FPC_SHORTSTR_ASSIGN'];compilerproc;
  387. {r0: len
  388. r1: sstr
  389. r2: dstr}
  390. asm
  391. ldrb r12,[r1],#1
  392. cmp r12,r0
  393. movgt r12,r0
  394. strb r12,[r2],#1
  395. cmp r12,#6 (* 6 seems to be the break even point. *)
  396. blt .LStartTailCopy
  397. (* Align destination on 32bits. This is the only place where unrolling
  398. really seems to help, since in the common case, sstr is aligned on
  399. 32 bits, therefore in the common case we need to copy 3 bytes to
  400. align, i.e. in the case of a loop, you wouldn't branch out early.*)
  401. rsb r3,r2,#0
  402. ands r3,r3,#3
  403. sub r12,r12,r3
  404. ldrneb r0,[r1],#1
  405. strneb r0,[r2],#1
  406. subnes r3,r3,#1
  407. ldrneb r0,[r1],#1
  408. strneb r0,[r2],#1
  409. subnes r3,r3,#1
  410. ldrneb r0,[r1],#1
  411. strneb r0,[r2],#1
  412. subnes r3,r3,#1
  413. .LDoneAlign:
  414. (* Destination should be aligned now, but source might not be aligned,
  415. if this is the case, do a byte-per-byte copy. *)
  416. tst r1,#3
  417. bne .LStartTailCopy
  418. (* Start the main copy, 32 bit at a time. *)
  419. movs r3,r12,lsr #2
  420. and r12,r12,#3
  421. beq .LStartTailCopy
  422. .LNext4bytes:
  423. (* Unrolling this loop would save a little bit of time for long strings
  424. (>20 chars), but alas, it hurts for short strings and they are the
  425. common case.*)
  426. ldrne r0,[r1],#4
  427. strne r0,[r2],#4
  428. subnes r3,r3,#1
  429. bne .LNext4bytes
  430. .LStartTailCopy:
  431. (* Do remaining bytes. *)
  432. cmp r12,#0
  433. beq .LDoneTail
  434. .LNextChar3:
  435. ldrb r0,[r1],#1
  436. strb r0,[r2],#1
  437. subs r12,r12,#1
  438. bne .LNextChar3
  439. .LDoneTail:
  440. end;
  441. {$endif FPC_SYSTEM_HAS_FPC_SHORTSTR_ASSIGN}
  442. {$ifndef FPC_SYSTEM_HAS_FPC_PCHAR_LENGTH}
  443. {$define FPC_SYSTEM_HAS_FPC_PCHAR_LENGTH}
  444. function fpc_Pchar_length(p:Pchar):sizeint;assembler;nostackframe;[public,alias:'FPC_PCHAR_LENGTH'];compilerproc;
  445. asm
  446. cmp r0,#0
  447. mov r1,r0
  448. beq .Ldone
  449. .Lnextchar:
  450. (*Are we aligned?*)
  451. tst r1,#3
  452. bne .Ltest_unaligned (*No, do byte per byte.*)
  453. ldr r3,.L01010101
  454. .Ltest_aligned:
  455. (*Aligned, load 4 bytes at a time.*)
  456. ldr r12,[r1],#4
  457. (*Check wether r12 contains a 0 byte.*)
  458. sub r2,r12,r3
  459. mvn r12,r12
  460. and r2,r2,r12
  461. ands r2,r2,r3,lsl #7 (*r3 lsl 7 = $80808080*)
  462. beq .Ltest_aligned (*No 0 byte, repeat.*)
  463. sub r1,r1,#4
  464. .Ltest_unaligned:
  465. ldrb r12,[r1],#1
  466. cmp r12,#1 (*r12<1 same as r12=0, but result in carry flag*)
  467. bcs .Lnextchar
  468. (*Dirty trick: we need to subtract 1 extra because we have counted the
  469. terminating 0, due to the known carry flag sbc can do this.*)
  470. sbc r0,r1,r0
  471. .Ldone:
  472. {$ifdef CPUARM_HAS_BX}
  473. bx lr
  474. {$else}
  475. mov pc,lr
  476. {$endif}
  477. .L01010101:
  478. .long 0x01010101
  479. end;
  480. {$endif}
  481. {$ifndef darwin}
  482. {$define FPC_SYSTEM_HAS_ANSISTR_DECR_REF}
  483. Procedure fpc_ansistr_decr_ref (Var S : Pointer); [Public,Alias:'FPC_ANSISTR_DECR_REF'];assembler;nostackframe; compilerproc;
  484. asm
  485. ldr r1, [r0]
  486. // On return the pointer will always be set to zero, so utilize the delay slots
  487. mov r2, #0
  488. str r2, [r0]
  489. // Check for a zero string
  490. cmp r1, #0
  491. // Load reference counter
  492. ldrne r2, [r1, #-8]
  493. {$ifdef CPUARM_HAS_BX}
  494. bxeq lr
  495. {$else}
  496. moveq pc,lr
  497. {$endif}
  498. // Check for a constant string
  499. cmp r2, #0
  500. {$ifdef CPUARM_HAS_BX}
  501. bxlt lr
  502. {$else}
  503. movlt pc,lr
  504. {$endif}
  505. stmfd sp!, {r1, lr}
  506. sub r0, r1, #8
  507. bl InterLockedDecrement
  508. // InterLockedDecrement is a nice guy and sets the z flag for us
  509. // if the reference count dropped to 0
  510. ldmnefd sp!, {r1, pc}
  511. ldmfd sp!, {r0, lr}
  512. // We currently can not use constant symbols in ARM-Assembly
  513. // but we need to stay backward compatible with 2.6
  514. sub r0, r0, #12
  515. // Jump without a link, so freemem directly returns to our caller
  516. b FPC_FREEMEM
  517. end;
  518. {$define FPC_SYSTEM_HAS_ANSISTR_INCR_REF}
  519. Procedure fpc_ansistr_incr_ref (S : Pointer); [Public,Alias:'FPC_ANSISTR_INCR_REF'];assembler;nostackframe; compilerproc;
  520. asm
  521. // Null string?
  522. cmp r0, #0
  523. // Load reference counter
  524. ldrne r1, [r0, #-8]
  525. // pointer to counter, calculate here for delay slot utilization
  526. subne r0, r0, #8
  527. {$ifdef CPUARM_HAS_BX}
  528. bxeq lr
  529. {$else}
  530. moveq pc,lr
  531. {$endif}
  532. // Check for a constant string
  533. cmp r1, #0
  534. // Tailcall
  535. // Hopefully the linker will place InterLockedIncrement as layed out here
  536. bge InterLockedIncrement
  537. // Freepascal will generate a proper return here, save some cachespace
  538. end;
  539. {$endif not darwin}
  540. // --- InterLocked functions begin
  541. {$if not defined(CPUARM_HAS_LDREX) and not defined(SYSTEM_HAS_KUSER_CMPXCHG) }
  542. // Use generic interlock implementation
  543. var
  544. fpc_system_lock: longint;
  545. {$ifdef FPC_PIC}
  546. // Use generic interlock implementation with PIC
  547. // A helper function to get a pointer to fpc_system_lock in the PIC compatible way.
  548. function get_fpc_system_lock_ptr: pointer;
  549. begin
  550. get_fpc_system_lock_ptr:=@fpc_system_lock;
  551. end;
  552. {$endif FPC_PIC}
  553. {$endif}
  554. function InterLockedDecrement (var Target: longint) : longint; assembler; nostackframe;
  555. asm
  556. {$ifdef CPUARM_HAS_LDREX}
  557. .Lloop:
  558. ldrex r1, [r0]
  559. sub r1, r1, #1
  560. strex r2, r1, [r0]
  561. cmp r2, #0
  562. bne .Lloop
  563. movs r0, r1
  564. bx lr
  565. {$else}
  566. {$ifdef SYSTEM_HAS_KUSER_CMPXCHG}
  567. stmfd r13!, {lr}
  568. mov r2, r0 // kuser_cmpxchg does not clobber r2 by definition
  569. .Latomic_dec_loop:
  570. ldr r0, [r2] // Load the current value
  571. // We expect this to work without looping most of the time
  572. // R3 gets clobbered in kuser_cmpxchg so in the unlikely case that we have to
  573. // loop here again, we have to reload the value. Normaly this just fills the
  574. // load stall-cycles from the above ldr so in reality we'll not get any additional
  575. // delays because of this
  576. // Don't use ldr to load r3 to avoid cacheline trashing
  577. // Load 0xffff0fff into r3 and substract to 0xffff0fc0,
  578. // the kuser_cmpxchg entry point
  579. mvn r3, #0x0000f000
  580. sub r3, r3, #0x3F
  581. sub r1, r0, #1 // Decrement value
  582. {$ifdef CPUARM_HAS_BLX}
  583. blx r3 // Call kuser_cmpxchg, sets C-Flag on success
  584. {$else}
  585. mov lr, pc
  586. {$ifdef CPUARM_HAS_BX}
  587. bx r3
  588. {$else}
  589. mov pc, r3
  590. {$endif}
  591. {$endif}
  592. // MOVS sets the Z flag when the result reaches zero, this can be used later on
  593. // The C-Flag will not be modified by this because we're not doing any shifting
  594. movcss r0, r1 // We expect that to work most of the time so keep it pipeline friendly
  595. ldmcsfd r13!, {pc}
  596. b .Latomic_dec_loop // kuser_cmpxchg sets C flag on error
  597. {$else}
  598. // lock
  599. {$ifdef FPC_PIC}
  600. push {r0,lr}
  601. bl get_fpc_system_lock_ptr
  602. mov r3,r0
  603. pop {r0,lr}
  604. {$else FPC_PIC}
  605. ldr r3, .Lfpc_system_lock
  606. {$endif FPC_PIC}
  607. mov r1, #1
  608. .Lloop:
  609. swp r2, r1, [r3]
  610. cmp r2, #0
  611. bne .Lloop
  612. // do the job
  613. ldr r1, [r0]
  614. sub r1, r1, #1
  615. str r1, [r0]
  616. movs r0, r1
  617. // unlock and return
  618. str r2, [r3]
  619. {$ifdef CPUARM_HAS_BX}
  620. bx lr
  621. {$else}
  622. mov pc,lr
  623. {$endif}
  624. {$ifndef FPC_PIC}
  625. .Lfpc_system_lock:
  626. .long fpc_system_lock
  627. {$endif FPC_PIC}
  628. {$endif}
  629. {$endif}
  630. end;
  631. function InterLockedIncrement (var Target: longint) : longint; assembler; nostackframe;
  632. asm
  633. {$ifdef CPUARM_HAS_LDREX}
  634. .Lloop:
  635. ldrex r1, [r0]
  636. add r1, r1, #1
  637. strex r2, r1, [r0]
  638. cmp r2, #0
  639. bne .Lloop
  640. mov r0, r1
  641. bx lr
  642. {$else}
  643. {$ifdef SYSTEM_HAS_KUSER_CMPXCHG}
  644. stmfd r13!, {lr}
  645. mov r2, r0 // kuser_cmpxchg does not clobber r2 by definition
  646. .Latomic_inc_loop:
  647. ldr r0, [r2] // Load the current value
  648. // We expect this to work without looping most of the time
  649. // R3 gets clobbered in kuser_cmpxchg so in the unlikely case that we have to
  650. // loop here again, we have to reload the value. Normaly this just fills the
  651. // load stall-cycles from the above ldr so in reality we'll not get any additional
  652. // delays because of this
  653. // Don't use ldr to load r3 to avoid cacheline trashing
  654. // Load 0xffff0fff into r3 and substract to 0xffff0fc0,
  655. // the kuser_cmpxchg entry point
  656. mvn r3, #0x0000f000
  657. sub r3, r3, #0x3F
  658. add r1, r0, #1 // Increment value
  659. {$ifdef CPUARM_HAS_BLX}
  660. blx r3 // Call kuser_cmpxchg, sets C-Flag on success
  661. {$else}
  662. mov lr, pc
  663. {$ifdef CPUARM_HAS_BX}
  664. bx r3
  665. {$else}
  666. mov pc, r3
  667. {$endif}
  668. {$endif}
  669. movcs r0, r1 // We expect that to work most of the time so keep it pipeline friendly
  670. ldmcsfd r13!, {pc}
  671. b .Latomic_inc_loop // kuser_cmpxchg sets C flag on error
  672. {$else}
  673. // lock
  674. {$ifdef FPC_PIC}
  675. push {r0,lr}
  676. bl get_fpc_system_lock_ptr
  677. mov r3,r0
  678. pop {r0,lr}
  679. {$else FPC_PIC}
  680. ldr r3, .Lfpc_system_lock
  681. {$endif FPC_PIC}
  682. mov r1, #1
  683. .Lloop:
  684. swp r2, r1, [r3]
  685. cmp r2, #0
  686. bne .Lloop
  687. // do the job
  688. ldr r1, [r0]
  689. add r1, r1, #1
  690. str r1, [r0]
  691. mov r0, r1
  692. // unlock and return
  693. str r2, [r3]
  694. {$ifdef CPUARM_HAS_BX}
  695. bx lr
  696. {$else}
  697. mov pc,lr
  698. {$endif}
  699. {$ifndef FPC_PIC}
  700. .Lfpc_system_lock:
  701. .long fpc_system_lock
  702. {$endif FPC_PIC}
  703. {$endif}
  704. {$endif}
  705. end;
  706. function InterLockedExchange (var Target: longint;Source : longint) : longint; assembler; nostackframe;
  707. asm
  708. {$ifdef CPUARM_HAS_LDREX}
  709. // swp is deprecated on ARMv6 and above
  710. .Lloop:
  711. ldrex r2, [r0]
  712. strex r3, r1, [r0]
  713. cmp r3, #0
  714. bne .Lloop
  715. mov r0, r2
  716. bx lr
  717. {$else}
  718. {$ifdef SYSTEM_HAS_KUSER_CMPXCHG}
  719. stmfd r13!, {r4, lr}
  720. mov r2, r0 // kuser_cmpxchg does not clobber r2 (and r1) by definition
  721. .Latomic_add_loop:
  722. ldr r0, [r2] // Load the current value
  723. // We expect this to work without looping most of the time
  724. // R3 gets clobbered in kuser_cmpxchg so in the unlikely case that we have to
  725. // loop here again, we have to reload the value. Normaly this just fills the
  726. // load stall-cycles from the above ldr so in reality we'll not get any additional
  727. // delays because of this
  728. // Don't use ldr to load r3 to avoid cacheline trashing
  729. // Load 0xffff0fff into r3 and substract to 0xffff0fc0,
  730. // the kuser_cmpxchg entry point
  731. mvn r3, #0x0000f000
  732. sub r3, r3, #0x3F
  733. mov r4, r0 // save the current value because kuser_cmpxchg clobbers r0
  734. {$ifdef CPUARM_HAS_BLX}
  735. blx r3 // Call kuser_cmpxchg, sets C-Flag on success
  736. {$else}
  737. mov lr, pc
  738. {$ifdef CPUARM_HAS_BX}
  739. bx r3
  740. {$else}
  741. mov pc, r3
  742. {$endif}
  743. {$endif}
  744. // restore the original value if needed
  745. movcs r0, r4
  746. ldmcsfd r13!, {r4, pc}
  747. b .Latomic_add_loop // kuser_cmpxchg failed, loop back
  748. {$else}
  749. // lock
  750. {$ifdef FPC_PIC}
  751. push {r0,r1,lr}
  752. bl get_fpc_system_lock_ptr
  753. mov r3,r0
  754. pop {r0,r1,lr}
  755. {$else FPC_PIC}
  756. ldr r3, .Lfpc_system_lock
  757. {$endif FPC_PIC}
  758. mov r2, #1
  759. .Lloop:
  760. swp r2, r2, [r3]
  761. cmp r2, #0
  762. bne .Lloop
  763. // do the job
  764. ldr r2, [r0]
  765. str r1, [r0]
  766. mov r0, r2
  767. // unlock and return
  768. mov r2, #0
  769. str r2, [r3]
  770. {$ifdef CPUARM_HAS_BX}
  771. bx lr
  772. {$else}
  773. mov pc,lr
  774. {$endif}
  775. {$ifndef FPC_PIC}
  776. .Lfpc_system_lock:
  777. .long fpc_system_lock
  778. {$endif FPC_PIC}
  779. {$endif}
  780. {$endif}
  781. end;
  782. function InterLockedExchangeAdd (var Target: longint;Source : longint) : longint; assembler; nostackframe;
  783. asm
  784. {$ifdef CPUARM_HAS_LDREX}
  785. .Lloop:
  786. ldrex r2, [r0]
  787. add r12, r1, r2
  788. strex r3, r12, [r0]
  789. cmp r3, #0
  790. bne .Lloop
  791. mov r0, r2
  792. bx lr
  793. {$else}
  794. {$ifdef SYSTEM_HAS_KUSER_CMPXCHG}
  795. stmfd r13!, {r4, lr}
  796. mov r2, r0 // kuser_cmpxchg does not clobber r2 by definition
  797. mov r4, r1 // Save addend
  798. .Latomic_add_loop:
  799. ldr r0, [r2] // Load the current value
  800. // We expect this to work without looping most of the time
  801. // R3 gets clobbered in kuser_cmpxchg so in the unlikely case that we have to
  802. // loop here again, we have to reload the value. Normaly this just fills the
  803. // load stall-cycles from the above ldr so in reality we'll not get any additional
  804. // delays because of this
  805. // Don't use ldr to load r3 to avoid cacheline trashing
  806. // Load 0xffff0fff into r3 and substract to 0xffff0fc0,
  807. // the kuser_cmpxchg entry point
  808. mvn r3, #0x0000f000
  809. sub r3, r3, #0x3F
  810. add r1, r0, r4 // Add to value
  811. {$ifdef CPUARM_HAS_BLX}
  812. blx r3 // Call kuser_cmpxchg, sets C-Flag on success
  813. {$else}
  814. mov lr, pc
  815. {$ifdef CPUARM_HAS_BX}
  816. bx r3
  817. {$else}
  818. mov pc, r3
  819. {$endif}
  820. {$endif}
  821. // r1 does not get clobbered, so just get back the original value
  822. // Otherwise we would have to allocate one more register and store the
  823. // temporary value
  824. subcs r0, r1, r4
  825. ldmcsfd r13!, {r4, pc}
  826. b .Latomic_add_loop // kuser_cmpxchg failed, loop back
  827. {$else}
  828. // lock
  829. {$ifdef FPC_PIC}
  830. push {r0,r1,lr}
  831. bl get_fpc_system_lock_ptr
  832. mov r3,r0
  833. pop {r0,r1,lr}
  834. {$else FPC_PIC}
  835. ldr r3, .Lfpc_system_lock
  836. {$endif FPC_PIC}
  837. mov r2, #1
  838. .Lloop:
  839. swp r2, r2, [r3]
  840. cmp r2, #0
  841. bne .Lloop
  842. // do the job
  843. ldr r2, [r0]
  844. add r1, r1, r2
  845. str r1, [r0]
  846. mov r0, r2
  847. // unlock and return
  848. mov r2, #0
  849. str r2, [r3]
  850. {$ifdef CPUARM_HAS_BX}
  851. bx lr
  852. {$else}
  853. mov pc,lr
  854. {$endif}
  855. {$ifndef FPC_PIC}
  856. .Lfpc_system_lock:
  857. .long fpc_system_lock
  858. {$endif FPC_PIC}
  859. {$endif}
  860. {$endif}
  861. end;
  862. function InterlockedCompareExchange(var Target: longint; NewValue: longint; Comperand: longint): longint; assembler; nostackframe;
  863. asm
  864. {$ifdef CPUARM_HAS_LDREX}
  865. .Lloop:
  866. ldrex r3, [r0]
  867. mov r12, #0
  868. cmp r3, r2
  869. strexeq r12, r1, [r0]
  870. cmp r12, #0
  871. bne .Lloop
  872. mov r0, r3
  873. bx lr
  874. {$else}
  875. {$ifdef SYSTEM_HAS_KUSER_CMPXCHG}
  876. stmfd r13!, {r4, lr}
  877. mov r4, r2 // Swap parameters around
  878. mov r2, r0
  879. mov r0, r4 // Use r4 because we'll need the new value for later
  880. // r1 and r2 will not be clobbered by kuser_cmpxchg
  881. // If we have to loop, r0 will be set to the original Comperand
  882. // kuser_cmpxchg is documented to destroy r3, therefore setting
  883. // r3 must be in the loop
  884. .Linterlocked_compare_exchange_loop:
  885. mvn r3, #0x0000f000
  886. sub r3, r3, #0x3F
  887. {$ifdef CPUARM_HAS_BLX}
  888. blx r3 // Call kuser_cmpxchg, sets C-Flag on success
  889. {$else}
  890. mov lr, pc
  891. {$ifdef CPUARM_HAS_BX}
  892. bx r3
  893. {$else}
  894. mov pc, r3
  895. {$endif}
  896. {$endif}
  897. movcs r0, r4 // Return the previous value on success
  898. ldmcsfd r13!, {r4, pc}
  899. // The error case is a bit tricky, kuser_cmpxchg does not return the current value
  900. // So we may need to loop to avoid race conditions
  901. // The loop case is HIGHLY unlikely, it would require that we got rescheduled between
  902. // calling kuser_cmpxchg and the ldr. While beeing rescheduled another process/thread
  903. // would have the set the value to our comperand
  904. ldr r0, [r2] // Load the currently set value
  905. cmp r0, r4 // Return if Comperand != current value, otherwise loop again
  906. ldmnefd r13!, {r4, pc}
  907. // If we need to loop here, we have to
  908. b .Linterlocked_compare_exchange_loop
  909. {$else}
  910. // lock
  911. {$ifdef FPC_PIC}
  912. push {r0,r1,r2,lr}
  913. bl get_fpc_system_lock_ptr
  914. mov r12,r0
  915. pop {r0,r1,r2,lr}
  916. {$else FPC_PIC}
  917. ldr r12, .Lfpc_system_lock
  918. {$endif FPC_PIC}
  919. mov r3, #1
  920. .Lloop:
  921. swp r3, r3, [r12]
  922. cmp r3, #0
  923. bne .Lloop
  924. // do the job
  925. ldr r3, [r0]
  926. cmp r3, r2
  927. streq r1, [r0]
  928. mov r0, r3
  929. // unlock and return
  930. mov r3, #0
  931. str r3, [r12]
  932. {$ifdef CPUARM_HAS_BX}
  933. bx lr
  934. {$else}
  935. mov pc,lr
  936. {$endif}
  937. {$ifndef FPC_PIC}
  938. .Lfpc_system_lock:
  939. .long fpc_system_lock
  940. {$endif FPC_PIC}
  941. {$endif}
  942. {$endif}
  943. end;
  944. {$define FPC_SYSTEM_HAS_DECLOCKED_LONGINT}
  945. function declocked(var l: longint) : boolean; inline;
  946. begin
  947. Result:=InterLockedDecrement(l) = 0;
  948. end;
  949. {$define FPC_SYSTEM_HAS_INCLOCKED_LONGINT}
  950. procedure inclocked(var l: longint); inline;
  951. begin
  952. InterLockedIncrement(l);
  953. end;
  954. // --- InterLocked functions end
  955. procedure fpc_cpucodeinit;
  956. begin
  957. {$ifdef FPC_SYSTEM_FPC_MOVE}
  958. {$ifndef CPUARM_HAS_EDSP}
  959. cpu_has_edsp:=true;
  960. in_edsp_test:=true;
  961. asm
  962. bic r0,sp,#7
  963. // ldrd r0,r1,[r0]
  964. // encode this using .long so the rtl assembles also with instructions sets not supporting pld
  965. .long 0xe1c000d0
  966. end;
  967. in_edsp_test:=false;
  968. if cpu_has_edsp then
  969. moveproc:=@move_pld
  970. else
  971. moveproc:=@move_blended;
  972. {$else CPUARM_HAS_EDSP}
  973. cpu_has_edsp:=true;
  974. {$endif CPUARM_HAS_EDSP}
  975. {$endif FPC_SYSTEM_FPC_MOVE}
  976. end;
  977. {$define FPC_SYSTEM_HAS_SWAPENDIAN}
  978. { SwapEndian(<16 Bit>) being inlined is faster than using assembler }
  979. function SwapEndian(const AValue: SmallInt): SmallInt;{$ifdef SYSTEMINLINE}inline;{$endif}
  980. begin
  981. { the extra Word type cast is necessary because the "AValue shr 8" }
  982. { is turned into "longint(AValue) shr 8", so if AValue < 0 then }
  983. { the sign bits from the upper 16 bits are shifted in rather than }
  984. { zeroes. }
  985. Result := SmallInt((Word(AValue) shr 8) or (Word(AValue) shl 8));
  986. end;
  987. function SwapEndian(const AValue: Word): Word;{$ifdef SYSTEMINLINE}inline;{$endif}
  988. begin
  989. Result := Word((AValue shr 8) or (AValue shl 8));
  990. end;
  991. (*
  992. This is kept for reference. Thats what the compiler COULD generate in these cases.
  993. But FPC currently does not support inlining of asm-functions, so the whole call-overhead
  994. is bigger than the gain of the optimized function.
  995. function AsmSwapEndian(const AValue: SmallInt): SmallInt;{$ifdef SYSTEMINLINE}inline;{$endif};assembler;nostackframe;
  996. asm
  997. // We're starting with 4321
  998. {$if defined(CPUARM_HAS_REV)}
  999. rev r0, r0 // Reverse byteorder r0 = 1234
  1000. mov r0, r0, shr #16 // Shift down to 16bits r0 = 0012
  1001. {$else}
  1002. mov r0, r0, shl #16 // Shift to make that 2100
  1003. mov r0, r0, ror #24 // Rotate to 1002
  1004. orr r0, r0, r0 shr #16 // Shift and combine into 0012
  1005. {$endif}
  1006. end;
  1007. *)
  1008. {
  1009. These used to be an assembler-function, but with newer improvements to the compiler this
  1010. generates a perfect 4 cycle code sequence and can be inlined.
  1011. }
  1012. function SwapEndian(const AValue: LongWord): LongWord;{$ifdef SYSTEMINLINE}inline;{$endif}
  1013. var
  1014. Temp: LongWord;
  1015. begin
  1016. Temp := AValue xor rordword(AValue,16);
  1017. Temp := Temp and $FF00FFFF;
  1018. Result:= (Temp shr 8) xor rordword(AValue,8);
  1019. end;
  1020. function SwapEndian(const AValue: LongInt): LongInt;{$ifdef SYSTEMINLINE}inline;{$endif}
  1021. begin
  1022. Result:=LongInt(SwapEndian(DWord(AValue)));
  1023. end;
  1024. {
  1025. Currently freepascal will not generate a good assembler sequence for
  1026. Result:=(SwapEndian(longword(lo(AValue))) shl 32) or
  1027. (SwapEndian(longword(hi(AValue))));
  1028. So we keep an assembly version for now
  1029. }
  1030. function SwapEndian(const AValue: Int64): Int64; assembler; nostackframe;
  1031. asm
  1032. // fpc >2.6.0 adds the "rev" instruction in the internal assembler
  1033. {$if defined(CPUARM_HAS_REV)}
  1034. rev r2, r0
  1035. rev r0, r1
  1036. mov r1, r2
  1037. {$else}
  1038. mov ip, r1
  1039. // We're starting with r0 = $87654321
  1040. eor r1, r0, r0, ror #16 // r1 = $C444C444
  1041. bic r1, r1, #16711680 // r1 = r1 and $ff00ffff = $C400C444
  1042. mov r0, r0, ror #8 // r0 = $21876543
  1043. eor r1, r0, r1, lsr #8 // r1 = $21436587
  1044. eor r0, ip, ip, ror #16
  1045. bic r0, r0, #16711680
  1046. mov ip, ip, ror #8
  1047. eor r0, ip, r0, lsr #8
  1048. {$endif}
  1049. end;
  1050. function SwapEndian(const AValue: QWord): QWord; {$ifdef SYSTEMINLINE}inline;{$endif}
  1051. begin
  1052. Result:=QWord(SwapEndian(Int64(AValue)));
  1053. end;
  1054. {$ifndef FPC_SYSTEM_HAS_MEM_BARRIER}
  1055. {$define FPC_SYSTEM_HAS_MEM_BARRIER}
  1056. { Generic read/readwrite barrier code. }
  1057. procedure barrier; assembler; nostackframe;
  1058. asm
  1059. // manually encode the instructions to avoid bootstrap and -march external
  1060. // assembler settings
  1061. {$ifdef CPUARM_HAS_DMB}
  1062. .long 0xf57ff05f // dmb sy
  1063. {$else CPUARM_HAS_DMB}
  1064. {$ifdef CPUARMV6}
  1065. mov r0, #0
  1066. .long 0xee070fba // mcr 15, 0, r0, cr7, cr10, {5}
  1067. {$else CPUARMV6}
  1068. {$ifdef SYSTEM_HAS_KUSER_MEMORY_BARRIER}
  1069. stmfd r13!, {lr}
  1070. mvn r0, #0x0000f000
  1071. sub r0, r0, #0x5F
  1072. {$ifdef CPUARM_HAS_BLX}
  1073. blx r0 // Call kuser_memory_barrier at address 0xffff0fa0
  1074. {$else CPUARM_HAS_BLX}
  1075. mov lr, pc
  1076. {$ifdef CPUARM_HAS_BX}
  1077. bx r0
  1078. {$else CPUARM_HAS_BX}
  1079. mov pc, r0
  1080. {$endif CPUARM_HAS_BX}
  1081. {$endif CPUARM_HAS_BLX}
  1082. ldmfd r13!, {pc}
  1083. {$endif SYSTEM_HAS_KUSER_MEMORY_BARRIER}
  1084. {$endif CPUARMV6}
  1085. {$endif CPUARM_HAS_DMB}
  1086. end;
  1087. procedure ReadBarrier;{$ifdef SYSTEMINLINE}inline;{$endif}
  1088. begin
  1089. barrier;
  1090. end;
  1091. procedure ReadDependencyBarrier;{$ifdef SYSTEMINLINE}inline;{$endif}
  1092. begin
  1093. { reads imply barrier on earlier reads depended on; not required on ARM }
  1094. end;
  1095. procedure ReadWriteBarrier;{$ifdef SYSTEMINLINE}inline;{$endif}
  1096. begin
  1097. barrier;
  1098. end;
  1099. procedure WriteBarrier; assembler; nostackframe;
  1100. asm
  1101. // specialize the write barrier because according to ARM, implementations for
  1102. // "dmb st" may be more optimal than the more generic "dmb sy"
  1103. {$ifdef CPUARM_HAS_DMB}
  1104. .long 0xf57ff05e // dmb st
  1105. {$else CPUARM_HAS_DMB}
  1106. {$ifdef CPUARMV6}
  1107. mov r0, #0
  1108. .long 0xee070fba // mcr 15, 0, r0, cr7, cr10, {5}
  1109. {$else CPUARMV6}
  1110. {$ifdef SYSTEM_HAS_KUSER_MEMORY_BARRIER}
  1111. stmfd r13!, {lr}
  1112. mvn r0, #0x0000f000
  1113. sub r0, r0, #0x5F
  1114. {$ifdef CPUARM_HAS_BLX}
  1115. blx r0 // Call kuser_memory_barrier at address 0xffff0fa0
  1116. {$else CPUARM_HAS_BLX}
  1117. mov lr, pc
  1118. {$ifdef CPUARM_HAS_BX}
  1119. bx r0
  1120. {$else CPUARM_HAS_BX}
  1121. mov pc, r0
  1122. {$endif CPUARM_HAS_BX}
  1123. {$endif CPUARM_HAS_BLX}
  1124. ldmfd r13!, {pc}
  1125. {$endif SYSTEM_HAS_KUSER_MEMORY_BARRIER}
  1126. {$endif CPUARMV6}
  1127. {$endif CPUARM_HAS_DMB}
  1128. end;
  1129. {$endif}
  1130. {include hand-optimized assembler division code}
  1131. {$i divide.inc}