Compare commits
2938 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 93c563e073 | |||
| b3b02df569 | |||
| 0887760de1 | |||
| 148c333943 | |||
| 98e5809d24 | |||
| 87b6ed7f4c | |||
| 259f092143 | |||
| 108c9c6fb5 | |||
| 128b40ce3c | |||
| 717aec388b | |||
| f2287d2cd7 | |||
| 4182fa2967 | |||
| d6cf38a929 | |||
|
|
8c97883317 | ||
|
|
455a29c491 | ||
|
|
a618ad45bf | ||
|
|
0ff8ae4596 | ||
|
|
a5b689439b | ||
|
|
f9947541a1 | ||
|
|
c2b2b38081 | ||
|
|
7884c7da04 | ||
|
|
b471e97a10 | ||
|
|
611fe0bc42 | ||
|
|
6d649e1dff | ||
|
|
d0bfe7738a | ||
|
|
95d33554db | ||
|
|
4298e3d0cd | ||
|
|
a7fd70fa21 | ||
|
|
60dc47e0a6 | ||
|
|
ff298f21b7 | ||
|
|
f04a04a7e3 | ||
|
|
39e7f0c2d4 | ||
|
|
726b260cd5 | ||
|
|
6a8ce4b412 | ||
|
|
32626ea9e3 | ||
|
|
d4a8afd6e8 | ||
|
|
8acfd92f92 | ||
|
|
7fb4188f51 | ||
|
|
8b525bb8bc | ||
|
|
a86a16600b | ||
|
|
d0341754d6 | ||
|
|
f968bc1b2a | ||
|
|
7af7659ac2 | ||
|
|
a6952fd651 | ||
|
|
4c7fb35f57 | ||
|
|
87efb27dc5 | ||
|
|
45b306480e | ||
|
|
2a68fc6c48 | ||
|
|
30d88e1683 | ||
|
|
a97a69c96e | ||
|
|
87d0c9a2ed | ||
|
|
147d16d49e | ||
|
|
70f411148e | ||
|
|
6e8be22c8b | ||
|
|
e0ff380810 | ||
|
|
fb7b38ab59 | ||
|
|
1f7a7bef60 | ||
|
|
15cbb6fc95 | ||
|
|
f34e1093fb | ||
|
|
f47e1d5cae | ||
|
|
bc8bd3a189 | ||
|
|
c6ae79cbc8 | ||
|
|
528f6cef1c | ||
|
|
d8b353ac98 | ||
|
|
e3837dcc63 | ||
|
|
f9127b2d16 | ||
|
|
e05039d623 | ||
|
|
cd7806e9c0 | ||
|
|
aea10892d3 | ||
|
|
d6bacd0c7b | ||
|
|
c36095197d | ||
|
|
27d183d42f | ||
|
|
60d27ce4be | ||
|
|
f0cd6d6902 | ||
|
|
b2c9e43f04 | ||
|
|
de59f2e077 | ||
|
|
2ae7e07466 | ||
|
|
89fa6a4e75 | ||
|
|
8a92d557f4 | ||
|
|
99d1d2ffe3 | ||
|
|
0fdfaad053 | ||
|
|
5db1e5fa39 | ||
|
|
b2d98b9979 | ||
|
|
8258163c31 | ||
|
|
c8b45093f0 | ||
|
|
3d9c0ecd14 | ||
|
|
ab4cd1e8a2 | ||
|
|
2fab324924 | ||
|
|
e04d21b997 | ||
|
|
1efc4c8f1f | ||
|
|
7af8f96021 | ||
|
|
3c5b88dabf | ||
|
|
13b748a57c | ||
|
|
3957dccb51 | ||
|
|
49919aac70 | ||
|
|
8ec078f3e1 | ||
|
|
d80a3ed4b7 | ||
|
|
6a01712cfd | ||
|
|
0d9b120b02 | ||
|
|
c867a87727 | ||
|
|
3ea77d7ab9 | ||
|
|
533db89c76 | ||
|
|
c7e6b8302e | ||
|
|
d88240d227 | ||
|
|
5a2f0dcb4a | ||
|
|
306f3468c3 | ||
|
|
7691d961c1 | ||
|
|
4f49ac4cb0 | ||
|
|
58bf32c27f | ||
|
|
87d079bd30 | ||
|
|
0061ecf325 | ||
|
|
1709dbe9d6 | ||
|
|
69dd01fc69 | ||
|
|
1a0f5a0510 | ||
|
|
0ad8dfe20d | ||
|
|
1415f5b0b1 | ||
|
|
722c65d7b6 | ||
|
|
b25dbfa667 | ||
|
|
5f6e6e708c | ||
|
|
1324e6cdd5 | ||
|
|
28227f1a8b | ||
|
|
b4aa685d52 | ||
|
|
5862c9e6a5 | ||
|
|
1f87784f05 | ||
|
|
e2bf021bf7 | ||
|
|
33054bcacd | ||
|
|
89dfbf2125 | ||
|
|
e47c9152ec | ||
|
|
e37f4691b0 | ||
|
|
320a54b023 | ||
|
|
65f290b699 | ||
|
|
1c0f0920f9 | ||
|
|
6ea03bb3af | ||
|
|
e1cb6ca15b | ||
|
|
0ef3d3b429 | ||
|
|
243d6c2625 | ||
|
|
4c65b4886f | ||
|
|
f6dbffd58c | ||
|
|
094d6f6d2e | ||
|
|
57588857b3 | ||
|
|
6160091afd | ||
|
|
1c8f606fd2 | ||
|
|
9ae39d8361 | ||
|
|
988e506b1c | ||
|
|
3920874a35 | ||
|
|
9eedbeeb72 | ||
|
|
cf3cf01a6c | ||
|
|
90bed24b99 | ||
|
|
9ed0bb240d | ||
|
|
b0cd3924e5 | ||
|
|
b41f53b333 | ||
|
|
4ae2333c93 | ||
|
|
d59b70d0ea | ||
|
|
4e24a94ff4 | ||
|
|
40718c0ad3 | ||
|
|
06b9b46fb3 | ||
|
|
1370596c5e | ||
|
|
4e3d8540ff | ||
|
|
ccd8592e46 | ||
|
|
afb1132748 | ||
|
|
d82a1d018e | ||
|
|
f6773a318f | ||
|
|
7fc57abb23 | ||
|
|
e7b411f5e9 | ||
|
|
c82cfc7f4a | ||
|
|
e6edf80aa4 | ||
|
|
5b682b36f1 | ||
|
|
4f6c70aee0 | ||
|
|
e4a672483f | ||
|
|
b2471decb7 | ||
|
|
eff3ae65d8 | ||
|
|
9dbb839146 | ||
|
|
6633d69e65 | ||
|
|
b4bbc7b1f3 | ||
|
|
6f886b2457 | ||
|
|
042c662d9d | ||
|
|
536b30e12c | ||
|
|
44d8a2e01f | ||
|
|
b471cdb56f | ||
|
|
b8ad55019c | ||
|
|
b67d7f0b6a | ||
|
|
5ac46e5134 | ||
|
|
bdae4d5e23 | ||
|
|
35c97512b5 | ||
|
|
d862672664 | ||
|
|
10143eb716 | ||
|
|
a6f0aade5c | ||
|
|
ac8fef0f22 | ||
|
|
839c51ed5b | ||
|
|
129f06488b | ||
|
|
fe9b4a6ac5 | ||
|
|
2ac8ad2063 | ||
|
|
da7c2740f4 | ||
|
|
bb5cbf420c | ||
|
|
12ac782b13 | ||
|
|
d3020580ff | ||
|
|
c83674433b | ||
|
|
0e58415bdb | ||
|
|
9a477ee926 | ||
|
|
2c51fa54d0 | ||
|
|
e23821f282 | ||
|
|
5afb671764 | ||
|
|
5884e7c3f1 | ||
|
|
d4ebb3544d | ||
|
|
ef51f8c648 | ||
|
|
66b1499ee0 | ||
|
|
48ffdcf169 | ||
|
|
1948a93584 | ||
|
|
e7fd7dd6fd | ||
|
|
e2efcb50f3 | ||
|
|
a3799fd5d0 | ||
|
|
28b402a778 | ||
|
|
fb30a56cc3 | ||
|
|
d8fd909d4f | ||
|
|
37d67549bd | ||
|
|
00077390ed | ||
|
|
8f7f7d2cb7 | ||
|
|
6732d9cb64 | ||
|
|
b8413900bb | ||
|
|
56dc2279b1 | ||
|
|
ed71f684be | ||
|
|
16ae04c7f9 | ||
|
|
434aa30d12 | ||
|
|
df643cecbd | ||
|
|
f0a0e2d75d | ||
|
|
4a3f2016d4 | ||
|
|
3dbdeee5f6 | ||
|
|
374e445071 | ||
|
|
44f06304d2 | ||
|
|
88461fb527 | ||
|
|
41199179de | ||
|
|
1d08598f6e | ||
|
|
68e686df9f | ||
|
|
8bf8911893 | ||
|
|
b7839f6242 | ||
|
|
c9de707455 | ||
|
|
293e4bccae | ||
|
|
f9cccad370 | ||
|
|
d1d5a42be0 | ||
|
|
c9c9b24beb | ||
|
|
b718ae3a52 | ||
|
|
b674a5f0ed | ||
|
|
3340d2add2 | ||
|
|
c7cdf66d8e | ||
|
|
9a50b60b0c | ||
|
|
e164804f43 | ||
|
|
bcdef9ea71 | ||
|
|
5d509b69c8 | ||
|
|
7448ee97f2 | ||
|
|
4c3e5c0d2b | ||
|
|
729cc48603 | ||
|
|
d37455925f | ||
|
|
d2720e2490 | ||
|
|
e11022c95a | ||
|
|
3fff68b9c0 | ||
|
|
15b121039f | ||
|
|
a6c85fd0e1 | ||
|
|
5e0bbf0081 | ||
|
|
ab2d14189e | ||
|
|
197c13b3b9 | ||
|
|
ef1d3270a7 | ||
|
|
327af85e3e | ||
|
|
d295356a39 | ||
|
|
a80706af2b | ||
|
|
9bfc3850c7 | ||
|
|
db1d817dee | ||
|
|
f3548037fe | ||
|
|
ab0e8bba00 | ||
|
|
90c6e0172a | ||
|
|
2c4c79d55e | ||
|
|
023ba72a9a | ||
|
|
3f9e64e0ec | ||
|
|
3bc99c1da1 | ||
|
|
bd8d78d204 | ||
|
|
22a0596fca | ||
|
|
dff48639e9 | ||
|
|
d6bb5e2264 | ||
|
|
56f83aeb38 | ||
|
|
415f2e938c | ||
|
|
73f7f583e4 | ||
|
|
04987422e7 | ||
|
|
f864338ce2 | ||
|
|
23ebdbe1f2 | ||
|
|
ef5dafb745 | ||
|
|
016781027c | ||
|
|
f187671a89 | ||
|
|
ea89cb65bb | ||
|
|
ad2238e880 | ||
|
|
cb416611ce | ||
|
|
9f083f99ac | ||
|
|
21da408832 | ||
|
|
ba10b91648 | ||
|
|
8217448ee5 | ||
|
|
25aeedb003 | ||
|
|
26a93bc733 | ||
|
|
b6d2d8dd4c | ||
|
|
8c1bd4ec32 | ||
|
|
2aedc1fd2b | ||
|
|
b805609986 | ||
|
|
c50ce30b00 | ||
|
|
3ec674a820 | ||
|
|
f00b5abc40 | ||
|
|
0885152713 | ||
|
|
a2dd501c11 | ||
|
|
8de5047035 | ||
|
|
b20d814a24 | ||
|
|
bf4ca847a5 | ||
|
|
ebc47d00a1 | ||
|
|
308ccae0c5 | ||
|
|
c5207da9e7 | ||
|
|
0afa3f5713 | ||
|
|
b394f05835 | ||
|
|
73dc899ff1 | ||
|
|
0b62c28436 | ||
|
|
742870285a | ||
|
|
6ae27009c1 | ||
|
|
33771c2163 | ||
|
|
603d3a879c | ||
|
|
cbd8afabb7 | ||
|
|
284d59904e | ||
|
|
50345012c9 | ||
|
|
bf0055f045 | ||
|
|
dd81f13238 | ||
|
|
c30f08b474 | ||
|
|
478d7b1b06 | ||
|
|
697082e84e | ||
|
|
0ed674296f | ||
|
|
4d96244b29 | ||
|
|
33d8daf933 | ||
|
|
f1d72a7e2c | ||
|
|
a12f3ec84b | ||
|
|
5a5cb82043 | ||
|
|
289c3804a3 | ||
|
|
e531864424 | ||
|
|
5a2c937025 | ||
|
|
9bc04d26fc | ||
|
|
31612a7e17 | ||
|
|
1a1bed04f2 | ||
|
|
c986ffad4f | ||
|
|
891c1cf1f4 | ||
|
|
c251194b15 | ||
|
|
50d84e0884 | ||
|
|
65507ea66e | ||
|
|
b6a7ae6dca | ||
|
|
7f281a4642 | ||
|
|
94a4793a71 | ||
|
|
28b49837fc | ||
|
|
149052b195 | ||
|
|
d6380ca8a1 | ||
|
|
a70dcb13d1 | ||
|
|
5ec16356d0 | ||
|
|
f5e7165537 | ||
|
|
838c38c91a | ||
|
|
db29cbe851 | ||
|
|
d7cd5986db | ||
|
|
3514e03327 | ||
|
|
ef9c98fba8 | ||
|
|
2110708c8e | ||
|
|
82f5716362 | ||
|
|
a6b7e717f5 | ||
|
|
66b94fc37c | ||
|
|
f2743a6dc5 | ||
|
|
28fda1a013 | ||
|
|
7c9d9f6ee6 | ||
|
|
2549fa12c9 | ||
|
|
bea7cc9a81 | ||
|
|
9a03cd3590 | ||
|
|
46528caa5a | ||
|
|
7628f2a6c9 | ||
|
|
d01718aa91 | ||
|
|
3eccce5e4f | ||
|
|
92650bdff0 | ||
|
|
e0eac74f83 | ||
|
|
35222694e5 | ||
|
|
92388e8bad | ||
|
|
c9424a9989 | ||
|
|
f5c90dbd43 | ||
|
|
bd65df8ad4 | ||
|
|
e39bb485ea | ||
|
|
ebe165dff8 | ||
|
|
12ff721ec1 | ||
|
|
8fdd9a7ef7 | ||
|
|
8891534a3c | ||
|
|
be2adf40d8 | ||
|
|
57512c1e28 | ||
|
|
f3b8ce8cbc | ||
|
|
14da0a7f54 | ||
|
|
1c039ab96a | ||
|
|
256e8325e3 | ||
|
|
c76f03a263 | ||
|
|
e0e579bd99 | ||
|
|
e513fc7884 | ||
|
|
c8e492d82f | ||
|
|
aeb97374bf | ||
|
|
2a2bfc49f1 | ||
|
|
958143a8c5 | ||
|
|
66ad192266 | ||
|
|
728c55fd6f | ||
|
|
43f432c67a | ||
|
|
1328efd027 | ||
|
|
ad97d70a43 | ||
|
|
44cf9866cb | ||
|
|
0a8fab0737 | ||
|
|
c2039da7b8 | ||
|
|
0842b82eca | ||
|
|
03d3f7179b | ||
|
|
5defbf25f1 | ||
|
|
a0cbd7e33a | ||
|
|
0fd170f47a | ||
|
|
0b52022c13 | ||
|
|
0f1358b69f | ||
|
|
8040154b2f | ||
|
|
8e47273186 | ||
|
|
4518bce71b | ||
|
|
8c1d7c6dc5 | ||
|
|
2983722926 | ||
|
|
7f096f23e3 | ||
|
|
858a97a485 | ||
|
|
1d40f0a930 | ||
|
|
7729070481 | ||
|
|
e6ba7eb337 | ||
|
|
189e892b96 | ||
|
|
f62ad11fb0 | ||
|
|
e754cb99c7 | ||
|
|
7e06973054 | ||
|
|
38375b49e3 | ||
|
|
b5293067fc | ||
|
|
d1d59a2a01 | ||
|
|
1a9d08a325 | ||
|
|
1948d8ea42 | ||
|
|
41750a8336 | ||
|
|
8336615eae | ||
|
|
af18f7cc97 | ||
|
|
824e47ece7 | ||
|
|
f73091f391 | ||
|
|
ac4d0bf7ed | ||
|
|
391b59930b | ||
|
|
15535cfb64 | ||
|
|
82d425da04 | ||
|
|
d90011a535 | ||
|
|
b4af33327f | ||
|
|
8cc9d08f46 | ||
|
|
74c1feed08 | ||
|
|
5535bfc83f | ||
|
|
3e8f33da68 | ||
|
|
a3737e2b81 | ||
|
|
81251115af | ||
|
|
c22ee2b0a0 | ||
|
|
0292730cea | ||
|
|
29c73f242c | ||
|
|
13084ece5f | ||
|
|
704c83bcd6 | ||
|
|
bf4376be1b | ||
|
|
2534cd1439 | ||
|
|
39abb6f621 | ||
|
|
93a7c14e95 | ||
|
|
25988947e4 | ||
|
|
108b7dced5 | ||
|
|
fc1e12fb2f | ||
|
|
53f5ec09de | ||
|
|
aa94969472 | ||
|
|
49a0a08805 | ||
|
|
e780372884 | ||
|
|
dd567654e7 | ||
|
|
4d371c0f21 | ||
|
|
03211d5543 | ||
|
|
dac1ba44e4 | ||
|
|
d33100a11c | ||
|
|
17e41970d3 | ||
|
|
89ded54929 | ||
|
|
1d9d989d8d | ||
|
|
9526094272 | ||
|
|
81cb374084 | ||
|
|
a5b2695771 | ||
|
|
b16aee93f9 | ||
|
|
cd6f8249bf | ||
|
|
af9d6a2d05 | ||
|
|
4c96fd3279 | ||
|
|
0a166f245c | ||
|
|
b79bd65c4e | ||
|
|
5050874007 | ||
|
|
5c523332af | ||
|
|
de0d69ab26 | ||
|
|
aefcea95cc | ||
|
|
ef826603a7 | ||
|
|
1aa8309a9e | ||
|
|
fc8a32425c | ||
|
|
49b9297166 | ||
|
|
30b45731ae | ||
|
|
50f716f3d8 | ||
|
|
a66fab4cea | ||
|
|
603355f6d6 | ||
|
|
637a4cc812 | ||
|
|
663306b8f6 | ||
|
|
0ffea6832d | ||
|
|
8cb5f311c6 | ||
|
|
292962e8b0 | ||
|
|
55cfc6f78e | ||
|
|
eb907dc4ba | ||
|
|
bf50d8f951 | ||
|
|
7125026f42 | ||
|
|
97eff6a185 | ||
|
|
fec8390f7e | ||
|
|
30def668f0 | ||
|
|
9e1ec40bad | ||
|
|
3d35d4485d | ||
|
|
6d43d3c180 | ||
|
|
e6c827bf11 | ||
|
|
75ec582a1d | ||
|
|
c5916d58b8 | ||
|
|
dba22837f6 | ||
|
|
b43678b2e3 | ||
|
|
972c6906d5 | ||
|
|
522d80bd2c | ||
|
|
38fb4cc181 | ||
|
|
dfcdd84dc6 | ||
|
|
97e9977aef | ||
|
|
a54c0db457 | ||
|
|
e8ae16a7f3 | ||
|
|
64dfa6182b | ||
|
|
bead780f22 | ||
|
|
4b5141c0fb | ||
|
|
6621efe139 | ||
|
|
ae0c6dc62a | ||
|
|
f92d351cf0 | ||
|
|
f0aa481a2a | ||
|
|
a216b2bb9c | ||
|
|
29859e81ba | ||
|
|
0b1323e070 | ||
|
|
3a18a28001 | ||
|
|
c5e11468f3 | ||
|
|
ab5bf7e48d | ||
|
|
19d18b6e4e | ||
|
|
4c629d0a7c | ||
|
|
3718abc3d2 | ||
|
|
a98bfdf011 | ||
|
|
0644b4a7fd | ||
|
|
a3bf0b2406 | ||
|
|
5302cfe062 | ||
|
|
8090285d42 | ||
|
|
1abce94803 | ||
|
|
e3a78ad150 | ||
|
|
261dd70b6f | ||
|
|
6fa75fb4b1 | ||
|
|
432570c98f | ||
|
|
78bd4debc6 | ||
|
|
f14897101b | ||
|
|
7bdb2b967d | ||
|
|
f481a51f39 | ||
|
|
b35b931e3b | ||
|
|
19105494e1 | ||
|
|
9baade2cb5 | ||
|
|
fc717ebada | ||
|
|
ddc9d4d885 | ||
|
|
795f592013 | ||
|
|
3d086ca6b9 | ||
|
|
66f306f325 | ||
|
|
bf3b15b744 | ||
|
|
24fb2b483b | ||
|
|
6c5ed87c59 | ||
|
|
f17deafc0a | ||
|
|
803b0a2811 | ||
|
|
8c4d339f25 | ||
|
|
7b0eb0e4ad | ||
|
|
86ba817445 | ||
|
|
3e511b588b | ||
|
|
c16f9880c5 | ||
|
|
719279f71d | ||
|
|
5489894c3e | ||
|
|
127a5c50b8 | ||
|
|
ca866c6d54 | ||
|
|
4dff88d4c5 | ||
|
|
372b4583c2 | ||
|
|
ef9315200c | ||
|
|
7477a95a59 | ||
|
|
bbec080bb9 | ||
|
|
452c7ebdea | ||
|
|
bc1dc73cfc | ||
|
|
e60572f62c | ||
|
|
33d47c2b53 | ||
|
|
0bb27d839a | ||
|
|
1916153509 | ||
|
|
08b4e03f62 | ||
|
|
3dbc5ba6c2 | ||
|
|
8cf7445b93 | ||
|
|
53d918ae9e | ||
|
|
f84f359d8b | ||
|
|
280675eb80 | ||
|
|
8eeeebf091 | ||
|
|
ab6c7e42d6 | ||
|
|
ecd9a3a79c | ||
|
|
b27ff432ee | ||
|
|
bf87da7496 | ||
|
|
c377d8465e | ||
|
|
c0edb37000 | ||
|
|
b7f4324b8f | ||
|
|
85578f0462 | ||
|
|
01fb69798f | ||
|
|
382aacd710 | ||
|
|
ec2fc0cf0b | ||
|
|
ace3f20a22 | ||
|
|
5b28d0e703 | ||
|
|
7816fae331 | ||
|
|
64bc48863d | ||
|
|
1412f663f6 | ||
|
|
d983203b52 | ||
|
|
c461049896 | ||
|
|
54dcfdca99 | ||
|
|
d5cd049d8f | ||
|
|
c1777bff3b | ||
|
|
008fc6c51e | ||
|
|
933f78de7b | ||
|
|
eeba937282 | ||
|
|
24acd3f492 | ||
|
|
5d7c147a6a | ||
|
|
f49875d5cf | ||
|
|
7e88b42107 | ||
|
|
4698e665e4 | ||
|
|
115c8020b1 | ||
|
|
bd28502d2c | ||
|
|
d4a77e1b44 | ||
|
|
f291d90271 | ||
|
|
3f13af8e62 | ||
|
|
27ff7ac8d7 | ||
|
|
d359503ad8 | ||
|
|
ed53df90a4 | ||
|
|
05a64f1302 | ||
|
|
a6206398fc | ||
|
|
f065b060bd | ||
|
|
122142e439 | ||
|
|
d0673231d4 | ||
|
|
732d6a9e2c | ||
|
|
00f03292e6 | ||
|
|
625a249b65 | ||
|
|
7601532de8 | ||
|
|
11e9f4e3e0 | ||
|
|
d01e4fbe7a | ||
|
|
09d770f833 | ||
|
|
2f2af816e7 | ||
|
|
e70379e04f | ||
|
|
ae8b724d92 | ||
|
|
abeda29087 | ||
|
|
9c0b4cb8b3 | ||
|
|
c2016af162 | ||
|
|
cfd3418653 | ||
|
|
a095fd622f | ||
|
|
73141e58ed | ||
|
|
c560a1b10e | ||
|
|
82187f8372 | ||
|
|
aeaef1bedf | ||
|
|
7d01e4254b | ||
|
|
b31dbcf756 | ||
|
|
314289c52f | ||
|
|
da83196996 | ||
|
|
fb535fdfd0 | ||
|
|
3b1eaa65e4 | ||
|
|
08e246c628 | ||
|
|
4b48bfa87a | ||
|
|
948e75dcb5 | ||
|
|
192aeb0ae3 | ||
|
|
2d403cf258 | ||
|
|
5dfb96a310 | ||
|
|
134accaf46 | ||
|
|
3f607ade14 | ||
|
|
300ff7be75 | ||
|
|
9a56f8fb9a | ||
|
|
8672aff298 | ||
|
|
83b65ef534 | ||
|
|
515c5f76e0 | ||
|
|
b1fdbd63ec | ||
|
|
d90f677cb4 | ||
|
|
797238464a | ||
|
|
cd53e6abed | ||
|
|
1476d45536 | ||
|
|
476f186e8e | ||
|
|
8fa45f20dc | ||
|
|
24e8c33506 | ||
|
|
b17fcdeba2 | ||
|
|
ea2764b345 | ||
|
|
4d4a512f72 | ||
|
|
91538b5366 | ||
|
|
e4c79418c8 | ||
|
|
dc2e174d4f | ||
|
|
521aca9f09 | ||
|
|
0ac7ed3c97 | ||
|
|
91ee4ea7b6 | ||
|
|
98acce1185 | ||
|
|
4da0381aae | ||
|
|
dc0db2d1cb | ||
|
|
44ee5737a7 | ||
|
|
9022806fc0 | ||
|
|
91866396ef | ||
|
|
ff9380b248 | ||
|
|
b00575e95c | ||
|
|
ac524eff0d | ||
|
|
df9f839ed4 | ||
|
|
8d51e0620e | ||
|
|
70adb5d283 | ||
|
|
7e63862dff | ||
|
|
190d7957d4 | ||
|
|
307c825c4e | ||
|
|
7e39ed115d | ||
|
|
52eddcdcfd | ||
|
|
0d9eceb668 | ||
|
|
7084432a0e | ||
|
|
b02f30c822 | ||
|
|
701bd9b029 | ||
|
|
fb2c22c32d | ||
|
|
ddc25dffcf | ||
|
|
1211aa95fd | ||
|
|
d1b4ad6aca | ||
|
|
d18b85f8ca | ||
|
|
5d2ccfbe75 | ||
|
|
f1e68b6bef | ||
|
|
51a3d71fea | ||
|
|
6f518dfcc9 | ||
|
|
0a987ad06f | ||
|
|
7333e6ab7a | ||
|
|
296b057a0a | ||
|
|
5f3128bbb2 | ||
|
|
f0a5db068f | ||
|
|
265d5b7924 | ||
|
|
0bb5404e6b | ||
|
|
82cf349bf9 | ||
|
|
707056f825 | ||
|
|
22dcc9a651 | ||
|
|
56c78e55d5 | ||
|
|
2e3af7f474 | ||
|
|
3290ccd020 | ||
|
|
1934a09a3d | ||
|
|
7233e25907 | ||
|
|
615d36b236 | ||
|
|
e834703bb7 | ||
|
|
2b956e9cad | ||
|
|
d3158d1efe | ||
|
|
b21043c309 | ||
|
|
37a4362417 | ||
|
|
c565ec08ac | ||
|
|
8f68769af8 | ||
|
|
79fa1c3d4d | ||
|
|
6a2cb442ee | ||
|
|
6606d20a47 | ||
|
|
5cb50eca30 | ||
|
|
67243f3550 | ||
|
|
308746c7fb | ||
|
|
28e9032b10 | ||
|
|
44bcce3cc8 | ||
|
|
4351fc600b | ||
|
|
a1bcbed837 | ||
|
|
4152a0f9ed | ||
|
|
d8f4635366 | ||
|
|
e7717e58b5 | ||
|
|
83f3ee7cfa | ||
|
|
1b8afb73ad | ||
|
|
67cc62d619 | ||
|
|
12376e6a0c | ||
|
|
9316dd65c0 | ||
|
|
3b1445c660 | ||
|
|
39b1e4a204 | ||
|
|
df7d65b076 | ||
|
|
247dff8ab3 | ||
|
|
fcc1090595 | ||
|
|
9e184e65cf | ||
|
|
15e1f86711 | ||
|
|
6bc4aee029 | ||
|
|
b45429fc44 | ||
|
|
6cfe21094b | ||
|
|
90d9b91007 | ||
|
|
89f6fdd29c | ||
|
|
b07bf9f5c7 | ||
|
|
9a8782d122 | ||
|
|
e2249365f3 | ||
|
|
1f2079f2a8 | ||
|
|
a239f37302 | ||
|
|
215ca762d0 | ||
|
|
a4bcb1e1e2 | ||
|
|
0c0f69e3bf | ||
|
|
0fa17b1385 | ||
|
|
92e86ac5b7 | ||
|
|
8b34fb853e | ||
|
|
09b8f65246 | ||
|
|
9a3e4d8788 | ||
|
|
577f5e368a | ||
|
|
da281a99ec | ||
|
|
1671430b50 | ||
|
|
6c766be68c | ||
|
|
883963d90d | ||
|
|
b84fdb1f3c | ||
|
|
8645ee5546 | ||
|
|
ffa4b9e65c | ||
|
|
c6dbb2c066 | ||
|
|
0af6f6460b | ||
|
|
156f4e0fd8 | ||
|
|
e97601d3a8 | ||
|
|
100285a325 | ||
|
|
9796674218 | ||
|
|
8283cfc3ff | ||
|
|
d62fe78e6e | ||
|
|
5715350d96 | ||
|
|
17ee085396 | ||
|
|
c5cf05e085 | ||
|
|
f59c6223b4 | ||
|
|
30270584aa | ||
|
|
92377426bd | ||
|
|
3d71932ca6 | ||
|
|
ccf4d00385 | ||
|
|
f7390aaec9 | ||
|
|
d79529cada | ||
|
|
8f9a935132 | ||
|
|
3f24c8dedc | ||
|
|
741df13eb8 | ||
|
|
8e31d7e99c | ||
|
|
7ac8a9ea04 | ||
|
|
dcf70d887b | ||
|
|
558d7ee1d3 | ||
|
|
2e49ecd56f | ||
|
|
47b1f2182f | ||
|
|
efee86fa2e | ||
|
|
4e7ae5269b | ||
|
|
9238c72e08 | ||
|
|
83a863ea83 | ||
|
|
75759254c3 | ||
|
|
44ed10b6f2 | ||
|
|
82ed2ce416 | ||
|
|
29c0f75306 | ||
|
|
481224bbcd | ||
|
|
743f89f93f | ||
|
|
3aa2cce504 | ||
|
|
a83fec3dd0 | ||
|
|
b8a9139f8e | ||
|
|
eb61d5df72 | ||
|
|
57fb2a75ec | ||
|
|
0a6eb61ad0 | ||
|
|
df38b862c2 | ||
|
|
ddc6e33bc0 | ||
|
|
700fe244e7 | ||
|
|
c934a68bc4 | ||
|
|
8295df5a1e | ||
|
|
3b16cd8c56 | ||
|
|
4560df284b | ||
|
|
832aff5d76 | ||
|
|
46bd353027 | ||
|
|
1a2979aa7f | ||
|
|
8b8e313dc6 | ||
|
|
0881463d69 | ||
|
|
2e92989101 | ||
|
|
1d69b954bd | ||
|
|
90843b3bff | ||
|
|
78a7ef9fc5 | ||
|
|
efa0ea01f3 | ||
|
|
ddf5df6193 | ||
|
|
4fff0ab571 | ||
|
|
85b703981d | ||
|
|
45114d3283 | ||
|
|
723baca4c2 | ||
|
|
0a5b16dbe6 | ||
|
|
3f1362f744 | ||
|
|
8ff187a7b7 | ||
|
|
6a99135fa2 | ||
|
|
c12d6ec82c | ||
|
|
bf94e51418 | ||
|
|
c522aa87a8 | ||
|
|
7c9235559d | ||
|
|
be681118f5 | ||
|
|
e3bb2b423b | ||
|
|
47e00ed50c | ||
|
|
fd6c0d2704 | ||
|
|
b1245ef28d | ||
|
|
e0f0520c1f | ||
|
|
1c14cfc342 | ||
|
|
fe797352a0 | ||
|
|
57f29e5035 | ||
|
|
12d0cb2037 | ||
|
|
e33cf51b28 | ||
|
|
93c7ddbac5 | ||
|
|
5ba7d2696d | ||
|
|
2351f0898a | ||
|
|
81091d4be8 | ||
|
|
09ac9c5d00 | ||
|
|
f8b6ca2649 | ||
|
|
f5b373b818 | ||
|
|
fbfcc28753 | ||
|
|
9e84c0ef0c | ||
|
|
88f39db2eb | ||
|
|
1aaf623138 | ||
|
|
77dc94ab22 | ||
|
|
bd8d02527b | ||
|
|
7adacf5a7b | ||
|
|
6b5b547e2f | ||
|
|
9c9c77d2db | ||
|
|
b56dd9df21 | ||
|
|
22b7f548ad | ||
|
|
c2a75231d6 | ||
|
|
dd4f9c86c0 | ||
|
|
68577ae8a3 | ||
|
|
4a963315e5 | ||
|
|
a9954dd8e7 | ||
|
|
1588e8f7ca | ||
|
|
e3c476112c | ||
|
|
8745888ce9 | ||
|
|
7c9c2b25b0 | ||
|
|
6c622eeeee | ||
|
|
9b053c5518 | ||
|
|
8a72e23b29 | ||
|
|
538d9c4dc8 | ||
|
|
f87ecf0573 | ||
|
|
fbccf0f8b0 | ||
|
|
96d3363cb3 | ||
|
|
bfad04d648 | ||
|
|
8c3808c540 | ||
|
|
d0f4ece58a | ||
|
|
258ad709d4 | ||
|
|
ab91a90734 | ||
|
|
3ebc80ade6 | ||
|
|
898a56c53b | ||
|
|
b2928df4f8 | ||
|
|
f98038a03c | ||
|
|
dd07e2a457 | ||
|
|
3a2db12e77 | ||
|
|
c1f5a8d8ae | ||
|
|
6f3f4ff4a1 | ||
|
|
0571ca54f6 | ||
|
|
d44c4c05aa | ||
|
|
00aa11f620 | ||
|
|
02e08892b3 | ||
|
|
b292cffa4a | ||
|
|
ef949e14bb | ||
|
|
3dafbc4516 | ||
|
|
ee256cd3c6 | ||
|
|
10f00c6e4c | ||
|
|
02e584d932 | ||
|
|
d64e1bcd82 | ||
|
|
60fa76ccc1 | ||
|
|
b51d9bde91 | ||
|
|
290b8b9cc0 | ||
|
|
d0c9b7c9b5 | ||
|
|
10c6f840c4 | ||
|
|
ce7a00d174 | ||
|
|
9189bbb7d3 | ||
|
|
368d2f18f9 | ||
|
|
7171701599 | ||
|
|
c0fc9b7aca | ||
|
|
f59a1db8b7 | ||
|
|
725be222ac | ||
|
|
e044db016d | ||
|
|
b6592c796d | ||
|
|
21fb015069 | ||
|
|
a051c9bbd8 | ||
|
|
48e4373d28 | ||
|
|
fb82d37f09 | ||
|
|
ad3b66a178 | ||
|
|
ece37ea4f5 | ||
|
|
aff9427f64 | ||
|
|
00d435f726 | ||
|
|
74160e3737 | ||
|
|
09ec2db6e9 | ||
|
|
7c3a2435b3 | ||
|
|
083e1db489 | ||
|
|
9dd1d67a39 | ||
|
|
edeeda6fa5 | ||
|
|
fd253d5e91 | ||
|
|
cb41a28391 | ||
|
|
21cb609423 | ||
|
|
111b10cd5b | ||
|
|
1461b09b5f | ||
|
|
c2d2efd37d | ||
|
|
ad67e3bb5d | ||
|
|
23b861b48c | ||
|
|
1cd5d72be4 | ||
|
|
ff2f3c896f | ||
|
|
7cde453e36 | ||
|
|
6da9d43f69 | ||
|
|
d56d690d77 | ||
|
|
d618670fd3 | ||
|
|
d049746585 | ||
|
|
108fa7b97c | ||
|
|
8b107ad67a | ||
|
|
d607b9cb31 | ||
|
|
20fb66fa31 | ||
|
|
b3c5a9c4d6 | ||
|
|
44c74728bc | ||
|
|
1290e8c4cf | ||
|
|
ed12687837 | ||
|
|
7ecc9e0769 | ||
|
|
d890ccc92c | ||
|
|
97057ed7a1 | ||
|
|
5512fb6275 | ||
|
|
28c5326711 | ||
|
|
38a35dfb9a | ||
|
|
562cbef14e | ||
|
|
48a616d8a5 | ||
|
|
d8e2fdf913 | ||
|
|
8894156df5 | ||
|
|
c117c49dc9 | ||
|
|
6fdf1a8f99 | ||
|
|
2dbb4d9890 | ||
|
|
1459207d87 | ||
|
|
fe150c539f | ||
|
|
4a2fe338ef | ||
|
|
6be1ce40bb | ||
|
|
4b3b293ba7 | ||
|
|
502acf97e7 | ||
|
|
3459c75fbc | ||
|
|
1fc75ed494 | ||
|
|
2ed65c8b16 | ||
|
|
c6ef1cab79 | ||
|
|
0a2aede9ab | ||
|
|
6f3e6f368a | ||
|
|
182f2e83f4 | ||
|
|
69f3898a61 | ||
|
|
79bee49078 | ||
|
|
af9f871b93 | ||
|
|
699674a054 | ||
|
|
4dacd7e7a2 | ||
|
|
c2d65f7ad2 | ||
|
|
6348ca8da9 | ||
|
|
1a8002cf65 | ||
|
|
8d510fd82c | ||
|
|
76ea59b40b | ||
|
|
5e14469ea6 | ||
|
|
425540922c | ||
|
|
11fcaa20ae | ||
|
|
daae225887 | ||
|
|
4ed72335bd | ||
|
|
5da05b365f | ||
|
|
3f183cfd06 | ||
|
|
d3144da5eb | ||
|
|
e6b6766c33 | ||
|
|
2b064b272a | ||
|
|
5ec6c81d38 | ||
|
|
e6131bd6a9 | ||
|
|
5f55a9b9e2 | ||
|
|
68f62b1fc3 | ||
|
|
0173d60790 | ||
|
|
39cb13a2e4 | ||
|
|
eb8e94627d | ||
|
|
0eb9c2b576 | ||
|
|
77de0ac342 | ||
|
|
a2774f2cf5 | ||
|
|
118542badd | ||
|
|
d63a94300c | ||
|
|
dcc37451e5 | ||
|
|
fce4ba64a1 | ||
|
|
096546f888 | ||
|
|
94467fdb70 | ||
|
|
141ea81ba5 | ||
|
|
d9c09e1b81 | ||
|
|
a8b03e768c | ||
|
|
0cd53444a4 | ||
|
|
61970e1500 | ||
|
|
1705b5a65e | ||
|
|
c7281ef532 | ||
|
|
7ebea86a44 | ||
|
|
9bc9a5aa00 | ||
|
|
b1bf08c0d9 | ||
|
|
372e7d42f9 | ||
|
|
114f58bb0b | ||
|
|
f7d0158bac | ||
|
|
cc8bae2f2c | ||
|
|
fb581818c5 | ||
|
|
5a16b3eb10 | ||
|
|
61ac03fc08 | ||
|
|
84073b9bf1 | ||
|
|
ecdc695b22 | ||
|
|
809a8f065c | ||
|
|
a8aabd78d2 | ||
|
|
6f44d5b55f | ||
|
|
2c0a6d7f69 | ||
|
|
4c837d94a9 | ||
|
|
599624d962 | ||
|
|
4641a15287 | ||
|
|
792f04881c | ||
|
|
96fe6e4fb6 | ||
|
|
f337401484 | ||
|
|
02d55f24f6 | ||
|
|
11f17ce0cd | ||
|
|
43db682c6d | ||
|
|
2e5b0b7c07 | ||
|
|
ad9a0ac3df | ||
|
|
31b95b665b | ||
|
|
27132e42e9 | ||
|
|
8f8a9d89ef | ||
|
|
f0ce2acc4f | ||
|
|
1c0729df59 | ||
|
|
3f70585463 | ||
|
|
ead5cc741d | ||
|
|
e1524891fc | ||
|
|
e6a23a5932 | ||
|
|
8999a69546 | ||
|
|
dc00b4dd64 | ||
|
|
fa8776cfa2 | ||
|
|
47f7900cd3 | ||
|
|
7d4f58f268 | ||
|
|
6738af0a0c | ||
|
|
335d36211c | ||
|
|
d49c63ec3d | ||
|
|
644118cd17 | ||
|
|
f086b7ff9b | ||
|
|
2a77804739 | ||
|
|
9ab8f4e10e | ||
|
|
2e2fd394bf | ||
|
|
eb7401e526 | ||
|
|
4d05ec0e1e | ||
|
|
1043ffae0a | ||
|
|
c017e46820 | ||
|
|
38ce3f368c | ||
|
|
c4e35050b0 | ||
|
|
57c3c803d9 | ||
|
|
c2e05e2231 | ||
|
|
c9642aae86 | ||
|
|
9ef9f0bf32 | ||
|
|
798b6b202c | ||
|
|
5d58a814aa | ||
|
|
fbb34b3f3a | ||
|
|
91621d7b17 | ||
|
|
14718d8bbf | ||
|
|
06c06456c4 | ||
|
|
e95dfc7247 | ||
|
|
77e4564020 | ||
|
|
af836cda27 | ||
|
|
f0a7baf340 | ||
|
|
ed0f44f841 | ||
|
|
1a1dbdb476 | ||
|
|
259bb81b1e | ||
|
|
f09c74db21 | ||
|
|
42c4d3246c | ||
|
|
739e4e73ef | ||
|
|
1f2541fbec | ||
|
|
95140d9d9f | ||
|
|
b60d77c154 | ||
|
|
e8680760bf | ||
|
|
17d8047a93 | ||
|
|
a745aaf91c | ||
|
|
8acfd8ea02 | ||
|
|
42e00ebb24 | ||
|
|
1ed4142fdd | ||
|
|
558d8182db | ||
|
|
5794d18737 | ||
|
|
6a52717184 | ||
|
|
3fc95779ac | ||
|
|
a85972ffbc | ||
|
|
ee8e2b76e2 | ||
|
|
71758e9186 | ||
|
|
ac05de6835 | ||
|
|
e2d68e6119 | ||
|
|
7dd7020c5f | ||
|
|
788098b55f | ||
|
|
f280b32fa4 | ||
|
|
690a8acb30 | ||
|
|
7d3c248a05 | ||
|
|
94c0104ffa | ||
|
|
e20a8f7534 | ||
|
|
226bf34803 | ||
|
|
607e010874 | ||
|
|
a169ff636b | ||
|
|
463ddf1707 | ||
|
|
0c0655c0f2 | ||
|
|
93849370b1 | ||
|
|
fc7cefcf19 | ||
|
|
d4c6209aa0 | ||
|
|
d6b04c58e8 | ||
|
|
04118d4965 | ||
|
|
5ca952bd9a | ||
|
|
24e1a98275 | ||
|
|
a8c5da0ae0 | ||
|
|
4196c723eb | ||
|
|
11612a24ee | ||
|
|
c54a91eab3 | ||
|
|
dea856b7e3 | ||
|
|
01cbe3d289 | ||
|
|
ce6ca49d21 | ||
|
|
c3b041f7ae | ||
|
|
aaf2684988 | ||
|
|
7fcf408189 | ||
|
|
c325c0e085 | ||
|
|
67be0a85c0 | ||
|
|
3ec910ff85 | ||
|
|
f387d12872 | ||
|
|
1fec6a5556 | ||
|
|
946d2c17c8 | ||
|
|
c513493757 | ||
|
|
869379020d | ||
|
|
f3bb3dcfc2 | ||
|
|
681898ed1e | ||
|
|
a10be8087f | ||
|
|
4d61c04e5c | ||
|
|
07fe1c5659 | ||
|
|
12b4345672 | ||
|
|
6b50bd43f7 | ||
|
|
735e6a8ab3 | ||
|
|
f5508db24f | ||
|
|
04fda2fcbe | ||
|
|
e7b3a1c822 | ||
|
|
ccbcd0a80d | ||
|
|
54b991cfcb | ||
|
|
b719019b26 | ||
|
|
cc81cd3215 | ||
|
|
e8039cd822 | ||
|
|
a3c77e6dc6 | ||
|
|
ea0a514e03 | ||
|
|
193bba77b0 | ||
|
|
f0779f95a3 | ||
|
|
65d947e449 | ||
|
|
cef5b2eb04 | ||
|
|
1c1614d207 | ||
|
|
8490efe0ad | ||
|
|
438cee4e21 | ||
|
|
70a9b286e5 | ||
|
|
81aa19a8f0 | ||
|
|
668645fcda | ||
|
|
14e76108cb | ||
|
|
9ecb4f4ac8 | ||
|
|
9e1ab7c6b6 | ||
|
|
8ffa84f875 | ||
|
|
c8e92feb14 | ||
|
|
c23dd8a951 | ||
|
|
7b2ceba128 | ||
|
|
2570385770 | ||
|
|
aeb2f01a15 | ||
|
|
c59cff396d | ||
|
|
ecc9c88ff8 | ||
|
|
eb01ffd4e6 | ||
|
|
f225b558ec | ||
|
|
688d9c9a82 | ||
|
|
09e8381ec7 | ||
|
|
732a315a4b | ||
|
|
686c1d676d | ||
|
|
048da693c5 | ||
|
|
9a3b949687 | ||
|
|
40186d3813 | ||
|
|
66c986ba13 | ||
|
|
98cfc17843 | ||
|
|
d3a6693eef | ||
|
|
fe98fe8cdc | ||
|
|
eb1a495a7a | ||
|
|
720975dff4 | ||
|
|
4ee0e6996a | ||
|
|
47bdca1041 | ||
|
|
c2ed214a74 | ||
|
|
1a56fbc101 | ||
|
|
98c82242c5 | ||
|
|
e6a6df1052 | ||
|
|
6d034596d3 | ||
|
|
92e69c8197 | ||
|
|
b0753dc93d | ||
|
|
4515dd5c89 | ||
|
|
3a72e05c3e | ||
|
|
796942e6fa | ||
|
|
522257343b | ||
|
|
3594a80e04 | ||
|
|
9cc204e83a | ||
|
|
17ab194031 | ||
|
|
6bf2ad27d1 | ||
|
|
0c47a902f5 | ||
|
|
b340663cdf | ||
|
|
98a275a382 | ||
|
|
11bc35eb6c | ||
|
|
7ef2f7352c | ||
|
|
eb82195ad7 | ||
|
|
93cf02842c | ||
|
|
90ba3fddbc | ||
|
|
bead800c13 | ||
|
|
45d960f4f5 | ||
|
|
ac0963a0a5 | ||
|
|
5cf880e8fc | ||
|
|
86a6cfc1d0 | ||
|
|
33e19d3bec | ||
|
|
eb3277587a | ||
|
|
adef91d82d | ||
|
|
50c505b845 | ||
|
|
cedf4bccf4 | ||
|
|
5e37370618 | ||
|
|
f90688b089 | ||
|
|
0a83f9ab6e | ||
|
|
b79915c0c2 | ||
|
|
a918bda679 | ||
|
|
1ba54e3b65 | ||
|
|
92bb233668 | ||
|
|
420a8c986d | ||
|
|
900e71e5cc | ||
|
|
97971bef0c | ||
|
|
e93c2b88ba | ||
|
|
eb57852f2e | ||
|
|
6457241e66 | ||
|
|
bf245d3b98 | ||
|
|
4e26a1b700 | ||
|
|
2f44b81d4f | ||
|
|
d65e1b30ce | ||
|
|
4a17760a2d | ||
|
|
23c6325cb6 | ||
|
|
0e4af8c057 | ||
|
|
3bddfed542 | ||
|
|
e6d7a493cc | ||
|
|
73c6989951 | ||
|
|
1be05cb03a | ||
|
|
ac4d847eac | ||
|
|
3f641f487d | ||
|
|
1c8517947b | ||
|
|
4cb578bf60 | ||
|
|
6099492579 | ||
|
|
8ff32bfbba | ||
|
|
d4b46b1295 | ||
|
|
f6379dea82 | ||
|
|
784eb2d15b | ||
|
|
36ee8911b4 | ||
|
|
97253354ac | ||
|
|
2f5eb9f6d3 | ||
|
|
07c845be03 | ||
|
|
cb436d5b4a | ||
|
|
2634ae65fd | ||
|
|
922018ac2d | ||
|
|
e783afe87f | ||
|
|
fbcadf3d4d | ||
|
|
1f7b994232 | ||
|
|
c4ea5a3bfd | ||
|
|
651cc04ee1 | ||
|
|
354e535b37 | ||
|
|
df51b41a45 | ||
|
|
29bb129c9b | ||
|
|
1e5476e573 | ||
|
|
480dfc3879 | ||
|
|
90a70945d6 | ||
|
|
659573338c | ||
|
|
5a6b650d8b | ||
|
|
585afa09e5 | ||
|
|
f343e4cb0e | ||
|
|
1b993e167f | ||
|
|
d4dd945828 | ||
|
|
88ffa96263 | ||
|
|
2ae666dc7f | ||
|
|
8677890fc1 | ||
|
|
d9e8376209 | ||
|
|
00677f73ec | ||
|
|
8289108d16 | ||
|
|
711b4508c9 | ||
|
|
5885d47717 | ||
|
|
adcf635c1f | ||
|
|
239ec10edf | ||
|
|
3ae4a7e660 | ||
|
|
673d814a45 | ||
|
|
b7b5c9ad1d | ||
|
|
1c2dbd6a27 | ||
|
|
b1dbd3fcdf | ||
|
|
a78d75f185 | ||
|
|
52691fbb52 | ||
|
|
4ecf30530a | ||
|
|
a3b00fdcd6 | ||
|
|
a7d4a3f922 | ||
|
|
35395d7ed7 | ||
|
|
09ea9c9fd6 | ||
|
|
3e86dfe480 | ||
|
|
fcbdd93043 | ||
|
|
805196a6a0 | ||
|
|
bd34729217 | ||
|
|
6917c161c8 | ||
|
|
156aa4c139 | ||
|
|
ddb9b2fc47 | ||
|
|
00aeef9f1b | ||
|
|
c76c916475 | ||
|
|
9090d8b128 | ||
|
|
5a8351d7ea | ||
|
|
be6ac0408a | ||
|
|
1a07aed6aa | ||
|
|
1cf1dab649 | ||
|
|
f0d3501dbd | ||
|
|
da7a2c0c7f | ||
|
|
0091973bca | ||
|
|
e734bbc0cc | ||
|
|
ce88e95032 | ||
|
|
e87e332d2f | ||
|
|
2e7609156a | ||
|
|
06313e0ec3 | ||
|
|
08d78e6be5 | ||
|
|
11964a8ce8 | ||
|
|
7d0aa7a336 | ||
|
|
39962623cc | ||
|
|
5cde87ce80 | ||
|
|
e918fbf9f2 | ||
|
|
e87ac449e6 | ||
|
|
6931f87fcd | ||
|
|
98fc43d859 | ||
|
|
624aecf72e | ||
|
|
6ab22c6e92 | ||
|
|
2b8403c7f7 | ||
|
|
5376743281 | ||
|
|
215abab544 | ||
|
|
bc99897fbb | ||
|
|
3b2fe1d8f6 | ||
|
|
aa31957d84 | ||
|
|
5a773ed62a | ||
|
|
47c17305cf | ||
|
|
c39a5e0de8 | ||
|
|
d87748c5dd | ||
|
|
63d3ac6679 | ||
|
|
4581f10207 | ||
|
|
da02236b3a | ||
|
|
37e12045fb | ||
|
|
9552fc0724 | ||
|
|
0f56c11101 | ||
|
|
6d59ef49f7 | ||
|
|
741dfaa2ea | ||
|
|
5fa8bd3c78 | ||
|
|
86331022eb | ||
|
|
bdfcc615ea | ||
|
|
05eac8631b | ||
|
|
7b537fd3b6 | ||
|
|
1075b76612 | ||
|
|
66faf8b4e4 | ||
|
|
8f17468fa3 | ||
|
|
7726e87cd7 | ||
|
|
34b412bdf8 | ||
|
|
8190869714 | ||
|
|
84134678dc | ||
|
|
9389b6e3ef | ||
|
|
9053eed4b4 | ||
|
|
b6b8855728 | ||
|
|
2c043f67d0 | ||
|
|
1ed438dcdb | ||
|
|
db6f526b78 | ||
|
|
07c818833a | ||
|
|
f86de2be78 | ||
|
|
5f859e4885 | ||
|
|
cc53fa4c14 | ||
|
|
a401eb5a3b | ||
|
|
69c5e0aae7 | ||
|
|
d3dc5e0df1 | ||
|
|
f98a8cc22f | ||
|
|
de4d66c56f | ||
|
|
f011b3cb22 | ||
|
|
8347c766f0 | ||
|
|
0a66f17897 | ||
|
|
f0b49995e5 | ||
|
|
21313e52b4 | ||
|
|
1303b07b72 | ||
|
|
7d37f7b634 | ||
|
|
9b74e60185 | ||
|
|
96597d3716 | ||
|
|
7e63cafc85 | ||
|
|
ee9d1ad26f | ||
|
|
277fd2f8eb | ||
|
|
f130bfd25c | ||
|
|
4562304a29 | ||
|
|
3972d740a6 | ||
|
|
7fbe2eba59 | ||
|
|
91d4ae46f6 | ||
|
|
79ebf07882 | ||
|
|
18a50aa679 | ||
|
|
4d6a1d4cc0 | ||
|
|
a8a2cf9bdb | ||
|
|
785b2f5d24 | ||
|
|
c7ed130cce | ||
|
|
bf8a16b0e1 | ||
|
|
546f9cb409 | ||
|
|
77113fbffd | ||
|
|
260b1ad887 | ||
|
|
97cc5b7f48 | ||
|
|
774e907ecf | ||
|
|
27d957f0cb | ||
|
|
6878f83677 | ||
|
|
c75969d7c8 | ||
|
|
6500a98747 | ||
|
|
6c93793750 | ||
|
|
e9058756b5 | ||
|
|
15299571f9 | ||
|
|
b2368e243c | ||
|
|
0d944ac87e | ||
|
|
730e0f7098 | ||
|
|
6e2d59e279 | ||
|
|
07049d6c76 | ||
|
|
eb97ef149a | ||
|
|
2198d8ba51 | ||
|
|
cd929b17a7 | ||
|
|
50e3b68586 | ||
|
|
6c0d5a3334 | ||
|
|
4fa7f98074 | ||
|
|
a7c2a7d4f9 | ||
|
|
62deb82bf8 | ||
|
|
10798cf2ae | ||
|
|
ac89a7d264 | ||
|
|
c429e930b2 | ||
|
|
2f01af0595 | ||
|
|
25ea3e4dc1 | ||
|
|
478f4687b1 | ||
|
|
89169d5506 | ||
|
|
fd429e4fda | ||
|
|
034e6b9a0b | ||
|
|
a784434e1f | ||
|
|
be7f45b3bf | ||
|
|
916ccca3a1 | ||
|
|
c7bf732fdf | ||
|
|
772d240782 | ||
|
|
155bc0534a | ||
|
|
c4c30a416a | ||
|
|
323587f10f | ||
|
|
298b4b8b5b | ||
|
|
aa90a48872 | ||
|
|
71481150c7 | ||
|
|
b2f6043181 | ||
|
|
387c3b317c | ||
|
|
07c281738a | ||
|
|
4ef38e1615 | ||
|
|
904a575f93 | ||
|
|
92a232b59b | ||
|
|
2621863d08 | ||
|
|
e34f0cc250 | ||
|
|
5cfd773ec9 | ||
|
|
c3165df156 | ||
|
|
2f1008e2c3 | ||
|
|
2d8da306a1 | ||
|
|
1672ec80d0 | ||
|
|
6c7e7325ec | ||
|
|
a69c4527a1 | ||
|
|
34a588511f | ||
|
|
a615df9282 | ||
|
|
949984db18 | ||
|
|
07c6f1714a | ||
|
|
c668165ca7 | ||
|
|
9f933b500b | ||
|
|
136f2d9549 | ||
|
|
799e476b48 | ||
|
|
952490a1b8 | ||
|
|
bfb834fb24 | ||
|
|
7faac4c241 | ||
|
|
040605a83c | ||
|
|
f936269a1e | ||
|
|
7bf64bc490 | ||
|
|
5d51f8c7a7 | ||
|
|
f802164cce | ||
|
|
e70936e654 | ||
|
|
15816eb07e | ||
|
|
ca6b3dfa1c | ||
|
|
bdeaf7e88c | ||
|
|
6198561d70 | ||
|
|
5d1cda9869 | ||
|
|
d4b8a0f2eb | ||
|
|
d77789d8fe | ||
|
|
4a4da858cf | ||
|
|
d666fc3f8f | ||
|
|
473f1cb4d2 | ||
|
|
6d51987e67 | ||
|
|
59b989d243 | ||
|
|
63ecf009ec | ||
|
|
961116f4d5 | ||
|
|
37f3c0926c | ||
|
|
9a524fb838 | ||
|
|
b5dc78b06e | ||
|
|
b506c92d21 | ||
|
|
cdb50beaa2 | ||
|
|
20845eb117 | ||
|
|
ddfe782151 | ||
|
|
c06ec92d0d | ||
|
|
01432670fd | ||
|
|
7bc1a41814 | ||
|
|
b19937c4dc | ||
|
|
eef436b889 | ||
|
|
e618b1e14b | ||
|
|
ea1473cbe8 | ||
|
|
be21d190e2 | ||
|
|
189459f2b9 | ||
|
|
98c56c214a | ||
|
|
2951cad365 | ||
|
|
9a135c48d9 | ||
|
|
02dc2f460e | ||
|
|
ea94658411 | ||
|
|
d10c0d9545 | ||
|
|
8766e44b95 | ||
|
|
5012ba34b4 | ||
|
|
7022163e87 | ||
|
|
90cc7b25d4 | ||
|
|
040ef0bc49 | ||
|
|
7642bdd73b | ||
|
|
65768c20ae | ||
|
|
a448ccf20c | ||
|
|
d5b7c51e40 | ||
|
|
2d2d14744b | ||
|
|
f61f1a2020 | ||
|
|
31ee2951ce | ||
|
|
d46a54348a | ||
|
|
4a53ed1201 | ||
|
|
d2e57cfcac | ||
|
|
e172d7f1a9 | ||
|
|
3bc4788acb | ||
|
|
4faff1a63c | ||
|
|
4c330bc38b | ||
|
|
67d1985550 | ||
|
|
b94b89ba68 | ||
|
|
bec6662338 | ||
|
|
42c148bf75 | ||
|
|
d6dfbcd743 | ||
|
|
be813ea0a2 | ||
|
|
c751e44c6c | ||
|
|
eaa483d6e4 | ||
|
|
672d43a6cf | ||
|
|
9b19f0aaba | ||
|
|
218d2892e8 | ||
|
|
35a4d1b3a2 | ||
|
|
f6fa63bdef | ||
|
|
f3ff1fcbeb | ||
|
|
935800d7f6 | ||
|
|
726fc93634 | ||
|
|
8b972f2ed6 | ||
|
|
ef9e212eec | ||
|
|
18d6986c22 | ||
|
|
19f73b2ede | ||
|
|
b4102a4510 | ||
|
|
e192e8ea9e | ||
|
|
18f90e6339 | ||
|
|
406aad78fe | ||
|
|
280f3515b5 | ||
|
|
522daa26a6 | ||
|
|
bb46b561fd | ||
|
|
828a5d45cd | ||
|
|
0f7ac1cc90 | ||
|
|
019ff4709c | ||
|
|
3fd9d5a025 | ||
|
|
924858509d | ||
|
|
6f200d310f | ||
|
|
321b087039 | ||
|
|
357f115f11 | ||
|
|
5531586c35 | ||
|
|
40da411fa5 | ||
|
|
676c367db1 | ||
|
|
5722d17924 | ||
|
|
97298eb112 | ||
|
|
b4df3663a9 | ||
|
|
86567ba96f | ||
|
|
0cf7043f5f | ||
|
|
0df23312ac | ||
|
|
e8fc32a7dc | ||
|
|
e4ee1692fb | ||
|
|
4bc8c79bd3 | ||
|
|
915dc4be7f | ||
|
|
cf2116e167 | ||
|
|
64762c5acd | ||
|
|
4f9b8ebc73 | ||
|
|
ee61a265f4 | ||
|
|
db4abfe198 | ||
|
|
4b7dbbf43b | ||
|
|
db13639460 | ||
|
|
1cdb4c21c2 | ||
|
|
008d9371b1 | ||
|
|
6640dd0a6c | ||
|
|
589538bf39 | ||
|
|
8d4dd13750 | ||
|
|
754a3208f2 | ||
|
|
4d278a654e | ||
|
|
9eaec6d58a | ||
|
|
616cd3316c | ||
|
|
4579d339ea | ||
|
|
36f584d341 | ||
|
|
6387bbf193 | ||
|
|
de177e7529 | ||
|
|
f17fdfdbef | ||
|
|
45d6bf196a | ||
|
|
5e2fff91f8 | ||
|
|
e13f8dd359 | ||
|
|
0897f3c1c4 | ||
|
|
a82368956e | ||
|
|
6841d8ba9a | ||
|
|
927da8e861 | ||
|
|
3c220a2813 | ||
|
|
5a01819fdc | ||
|
|
4977933d81 | ||
|
|
953e467a85 | ||
|
|
131ab07c2b | ||
|
|
1d94667a15 | ||
|
|
4421fb7e19 | ||
|
|
131ff50333 | ||
|
|
bc8b5b3896 | ||
|
|
a2d12517e7 | ||
|
|
95d6647dce | ||
|
|
3454f51d2c | ||
|
|
6b65f6d9f4 | ||
|
|
c93e71698e | ||
|
|
f9d2ede83c | ||
|
|
53bf4573f0 | ||
|
|
86652738c0 | ||
|
|
294fb039fe | ||
|
|
f12826bac5 | ||
|
|
2c8afde6d9 | ||
|
|
1445202e0e | ||
|
|
1b940fd41e | ||
|
|
f1fc3bdfba | ||
|
|
7aa37b19a9 | ||
|
|
967a49dd66 | ||
|
|
25df23fed3 | ||
|
|
e162d5a99d | ||
|
|
918ca339b6 | ||
|
|
8bb8f0eda4 | ||
|
|
be2cc8f946 | ||
|
|
599ada8354 | ||
|
|
83b9cc5c0a | ||
|
|
af75afeb7a | ||
|
|
42e181112a | ||
|
|
801f78f8a8 | ||
|
|
e100040f28 | ||
|
|
b8a39a1b26 | ||
|
|
8f768633ad | ||
|
|
65ea6fd48a | ||
|
|
d2c7b356cc | ||
|
|
af58955140 | ||
|
|
ffc9a33933 | ||
|
|
fbab9874f6 | ||
|
|
017e7890f7 | ||
|
|
48644813d4 | ||
|
|
c81821ed28 | ||
|
|
42cfe97427 | ||
|
|
09a2c12ea0 | ||
|
|
a0f6f264f6 | ||
|
|
6f9cea5b58 | ||
|
|
dd4ac42491 | ||
|
|
01df6ed4a9 | ||
|
|
e71259006c | ||
|
|
0f161b500f | ||
|
|
e442139c39 | ||
|
|
8b0f871c06 | ||
|
|
61fab0340c | ||
|
|
525eacd035 | ||
|
|
cddddfd255 | ||
|
|
780e9f31fe | ||
|
|
c0b54aa58c | ||
|
|
c0c1cc1ba7 | ||
|
|
dededd1929 | ||
|
|
6cd8a8f895 | ||
|
|
d3ade0654e | ||
|
|
2dd7128db5 | ||
|
|
1f13a236bf | ||
|
|
ca1dbc3d3b | ||
|
|
74db8cbab3 | ||
|
|
62bc39e600 | ||
|
|
268be7f0b5 | ||
|
|
55bf0d23c2 | ||
|
|
f433aa3ad5 | ||
|
|
f587e0a459 | ||
|
|
76bfcc29c2 | ||
|
|
1d91a626f2 | ||
|
|
dbde936c3c | ||
|
|
cf679187b1 | ||
|
|
fd17ad236a | ||
|
|
dbb96c1885 | ||
|
|
4cd7e10ad3 | ||
|
|
3fd76d59ea | ||
|
|
f445a470df | ||
|
|
4e5299a9bf | ||
|
|
a6afef9f3f | ||
|
|
6a1fb8ea31 | ||
|
|
f2c66dc4c3 | ||
|
|
a91c8e15e2 | ||
|
|
9c7a842163 | ||
|
|
3dd6173a65 | ||
|
|
e9bc2b7b54 | ||
|
|
38947ab71b | ||
|
|
8a7801264a | ||
|
|
66edc180be | ||
|
|
17809992d7 | ||
|
|
c10033211b | ||
|
|
7d4ea1b6f0 | ||
|
|
0dfe823c32 | ||
|
|
bef275f62c | ||
|
|
edb4c57e3d | ||
|
|
c1b3face8f | ||
|
|
9d23c10475 | ||
|
|
78d509dba5 | ||
|
|
1a37135f98 | ||
|
|
dbd0581cb3 | ||
|
|
946530019a | ||
|
|
8f6f6d10e7 | ||
|
|
3a549e5c2f | ||
|
|
dc7015c5f2 | ||
|
|
356e9c6810 | ||
|
|
d0ddec469a | ||
|
|
87de3a2d06 | ||
|
|
eb8e1a2160 | ||
|
|
ce5f8cd46f | ||
|
|
b2f62d51b0 | ||
|
|
b3c68af40a | ||
|
|
348100ba42 | ||
|
|
426afc7377 | ||
|
|
b04caabf39 | ||
|
|
32cfdd52d3 | ||
|
|
015af03bdc | ||
|
|
99946ae0e6 | ||
|
|
a910bfb539 | ||
|
|
cb7cbec0d5 | ||
|
|
cb6614da42 | ||
|
|
6fae459847 | ||
|
|
bc7c54e5c8 | ||
|
|
ab835d8086 | ||
|
|
0f7050d3aa | ||
|
|
2cef101022 | ||
|
|
0a069f7de2 | ||
|
|
dcf9c280ee | ||
|
|
a2fd124997 | ||
|
|
63917f8cc2 | ||
|
|
e7ddb9e642 | ||
|
|
ec17082864 | ||
|
|
9ce6fbe1fa | ||
|
|
8db3d25844 | ||
|
|
f9ec1a0097 | ||
|
|
61c33969a2 | ||
|
|
85eb4cf0d6 | ||
|
|
47cc470bf6 | ||
|
|
60881499dc | ||
|
|
f19cf9274e | ||
|
|
ed9bca0e12 | ||
|
|
f15cdc03e3 | ||
|
|
b31fc6f66d | ||
|
|
8baef6daa3 | ||
|
|
ac700d4860 | ||
|
|
b2baa35c3d | ||
|
|
b50d3944ea | ||
|
|
f115a32073 | ||
|
|
57aefdf830 | ||
|
|
0d48ace15e | ||
|
|
362ee06b9f | ||
|
|
9004f090c5 | ||
|
|
6948120094 | ||
|
|
6585a925be | ||
|
|
e682b19eda | ||
|
|
8ee3178166 | ||
|
|
09a6e37154 | ||
|
|
1b8e745ffe | ||
|
|
6a1952d1f9 | ||
|
|
9ba7b96825 | ||
|
|
ef5d2dc043 | ||
|
|
1f0f852fda | ||
|
|
a166eb7ea1 | ||
|
|
02681d531e | ||
|
|
641d882ea6 | ||
|
|
1e80b3b0d7 | ||
|
|
ff98271a43 | ||
|
|
500ad7fb51 | ||
|
|
4f486333ed | ||
|
|
9a677b62ab | ||
|
|
68ced6ce46 | ||
|
|
1bd5360d3b | ||
|
|
a7aa1ac1cf | ||
|
|
ae23320417 | ||
|
|
b68a751f4e | ||
|
|
8391d05697 | ||
|
|
63a3214cc6 | ||
|
|
4382902894 | ||
|
|
103ef25f12 | ||
|
|
84a7a5d1cb | ||
|
|
ac095dbf3e | ||
|
|
a508bd4290 | ||
|
|
c1de753db6 | ||
|
|
621679245a | ||
|
|
58aea1b61c | ||
|
|
9b5ee1b31b | ||
|
|
383e804ec1 | ||
|
|
34f7986e19 | ||
|
|
c5fc47cc19 | ||
|
|
814ee67519 | ||
|
|
43761173ec | ||
|
|
d2b89e0e37 | ||
|
|
c4ad8f6ed4 | ||
|
|
4d289b16c2 | ||
|
|
e6c8765891 | ||
|
|
f89bad1e94 | ||
|
|
ade8751442 | ||
|
|
f97a2d68c8 | ||
|
|
899f85ce9c | ||
|
|
78a05777bc | ||
|
|
e436e33771 | ||
|
|
faa69dc71f | ||
|
|
d72590ede6 | ||
|
|
c378429ffb | ||
|
|
ebe1831e71 | ||
|
|
8cdc70a5b9 | ||
|
|
6244902931 | ||
|
|
1bd5b704c6 | ||
|
|
2117002c01 | ||
|
|
0bd519da49 | ||
|
|
2e724b095e | ||
|
|
1710b9171f | ||
|
|
599153d86b | ||
|
|
fb1a2a0a40 | ||
|
|
7e9b4c0924 | ||
|
|
96112f9d45 | ||
|
|
496845df60 | ||
|
|
8808a8cc9c | ||
|
|
ba4977cd84 | ||
|
|
e751977b72 | ||
|
|
5409fc2e19 | ||
|
|
a4d6240ab4 | ||
|
|
78a26c8743 | ||
|
|
a45e147d38 | ||
|
|
477a688c68 | ||
|
|
d129e33b51 | ||
|
|
17b54cb0c8 | ||
|
|
99df2d9dbf | ||
|
|
8297edd251 | ||
|
|
b43861b467 | ||
|
|
92773ada6d | ||
|
|
e3db1091a8 | ||
|
|
7abbe97ee9 | ||
|
|
c7606bb93b | ||
|
|
0d9594354a | ||
|
|
c18fa15db1 | ||
|
|
44912e6b1e | ||
|
|
5d56d29240 | ||
|
|
0439605e94 | ||
|
|
1485419414 | ||
|
|
3da152a150 | ||
|
|
4222605f87 | ||
|
|
1b196520f6 | ||
|
|
10223cfac3 | ||
|
|
8b0fc558cb | ||
|
|
8a6789ef61 | ||
|
|
57f019a6e0 | ||
|
|
32c77be2f3 | ||
|
|
ac79f3f345 | ||
|
|
d45c5767d8 | ||
|
|
2741e3c1d0 | ||
|
|
dc8895352a | ||
|
|
959a741c14 | ||
|
|
af95188f29 | ||
|
|
c7b4164122 | ||
|
|
4a908cf38d | ||
|
|
b2cf0209b1 | ||
|
|
2d6f7a7c93 | ||
|
|
660f14245b | ||
|
|
49cefc2e97 | ||
|
|
7942bdb728 | ||
|
|
758efebb3c | ||
|
|
efad72fef4 | ||
|
|
3b4cc90800 | ||
|
|
8a39af8f72 | ||
|
|
ecb1174a18 | ||
|
|
39c2274f1a | ||
|
|
570246a016 | ||
|
|
8e71dbd6c1 | ||
|
|
da52ae844f | ||
|
|
396aaae098 | ||
|
|
5855ae7460 | ||
|
|
2a83cefd5b | ||
|
|
dfc723bc19 | ||
|
|
23cb59427d | ||
|
|
1c858c34f7 | ||
|
|
9f7a4aa867 | ||
|
|
4285b5a89e | ||
|
|
a80696f98f | ||
|
|
af5da885a5 | ||
|
|
349062d89c | ||
|
|
5a9b3b3abb | ||
|
|
ea50609829 | ||
|
|
019043f55e | ||
|
|
6319a84ded | ||
|
|
8eeb446f74 | ||
|
|
1913199a45 | ||
|
|
4b26b8b430 | ||
|
|
9e0e9dbecc | ||
|
|
87cecddabb | ||
|
|
ddecdeb834 | ||
|
|
5cabf0bef0 | ||
|
|
0647c02561 | ||
|
|
491c58aef3 | ||
|
|
28737f7ab4 | ||
|
|
0ed89e93fa | ||
|
|
0663cb41ff | ||
|
|
b2678b4338 | ||
|
|
d68dbbc7bc | ||
|
|
0c274212c2 | ||
|
|
dbef4fd7d7 | ||
|
|
43245bbc11 | ||
|
|
971d5f8d12 | ||
|
|
6a21218c13 | ||
|
|
bb8f7d4e3f | ||
|
|
67362935f4 | ||
|
|
3b8c7c9126 | ||
|
|
5f20dd01e8 | ||
|
|
a5782dfb2d | ||
|
|
1c527ae34c | ||
|
|
f45f6cb32a | ||
|
|
00cd90c6b0 | ||
|
|
406e2eb8d0 | ||
|
|
3cf63362a4 | ||
|
|
e4b1f58595 | ||
|
|
4b1a0b4bc4 | ||
|
|
922edb1128 | ||
|
|
0672289cb9 | ||
|
|
ff547fc6cb | ||
|
|
fa78d548cc | ||
|
|
191d9dede5 | ||
|
|
6e0b9ddc74 | ||
|
|
eef4e11768 | ||
|
|
6fd21d988d | ||
|
|
2332490481 | ||
|
|
e2a91e6de5 | ||
|
|
b258027061 | ||
|
|
97068765e8 | ||
|
|
ec5cbb8117 | ||
|
|
ce99b17616 | ||
|
|
06aa2067d9 | ||
|
|
36886971e3 | ||
|
|
9861375f0c | ||
|
|
ed825b3773 | ||
|
|
a9913c8337 | ||
|
|
a97eb7b7cb | ||
|
|
715b828266 | ||
|
|
40af8d6ed5 | ||
|
|
059d80cc11 | ||
|
|
7364e06387 | ||
|
|
efc20c2110 | ||
|
|
19379db3b6 | ||
|
|
9cf8e8cbf3 | ||
|
|
7a32699573 | ||
|
|
320c41ffcf | ||
|
|
9c79d4d182 | ||
|
|
582cfe55b6 | ||
|
|
8db378b265 | ||
|
|
71a7564317 | ||
|
|
c14b035a46 | ||
|
|
cf2eaa0014 | ||
|
|
cb92d54808 | ||
|
|
97d430d5cd | ||
|
|
320b1700ff | ||
|
|
e06267ef1b | ||
|
|
501a23ad20 | ||
|
|
c1cc80b1d5 | ||
|
|
28080b0c22 | ||
|
|
be3a40e70b | ||
|
|
5d8ebf3ca1 | ||
|
|
443987f536 | ||
|
|
f6ce969d9f | ||
|
|
f620cdbaa1 | ||
|
|
090dec8549 | ||
|
|
3f2217646e | ||
|
|
611477e214 | ||
|
|
9bb5c314cd | ||
|
|
f31a31478b | ||
|
|
5fb30939be | ||
|
|
60b413a9cb | ||
|
|
3e9d784013 | ||
|
|
0452b77169 | ||
|
|
10b8c481f5 | ||
|
|
502f8fd76b | ||
|
|
2b2905b567 | ||
|
|
e7f067d70c | ||
|
|
d976da7559 | ||
|
|
84dbd66d10 | ||
|
|
6be3c24ee5 | ||
|
|
42f31aed69 | ||
|
|
ed017c42f1 | ||
|
|
4766467271 | ||
|
|
ea8591a85a | ||
|
|
7ab4c5391c | ||
|
|
0c5742b6f8 | ||
|
|
1d76f74b16 | ||
|
|
5e5d42b918 | ||
|
|
cd9afe946c | ||
|
|
1276ea9844 | ||
|
|
0755e4f8ff | ||
|
|
ccdbddd388 | ||
|
|
5b20b06bd9 | ||
|
|
dff7735af9 | ||
|
|
fb34fc5a85 | ||
|
|
43423c276f | ||
|
|
5ffc3a8f4c | ||
|
|
3c06924a02 | ||
|
|
a174a90f86 | ||
|
|
4f48d3258a | ||
|
|
d9c38b5c1f | ||
|
|
d3c567503b | ||
|
|
d7562d3836 | ||
|
|
220f0b0b40 | ||
|
|
48ff03112f | ||
|
|
ab3b633733 | ||
|
|
fa93cb7d0b | ||
|
|
153fbc3d7d | ||
|
|
307abc8db7 | ||
|
|
af61c9bae3 | ||
|
|
67b549a937 | ||
|
|
b6df447b55 | ||
|
|
2d063925a1 | ||
|
|
bba84f247c | ||
|
|
780b0dfe47 | ||
|
|
04d61afa23 | ||
|
|
663ebf7857 | ||
|
|
53414f12e6 | ||
|
|
15a3ef370a | ||
|
|
c14659c675 | ||
|
|
f7f281a256 | ||
|
|
9ba49eabb2 | ||
|
|
e7abf3f2ea | ||
|
|
83e1630fbc | ||
|
|
0277ba1aaa | ||
|
|
753c001e69 | ||
|
|
10c0b42d0d | ||
|
|
564e61c828 | ||
|
|
946c39a5df | ||
|
|
2948e84846 | ||
|
|
068fd8098c | ||
|
|
d7b0c5794e | ||
|
|
b007bba59f | ||
|
|
abf43ad01d | ||
|
|
922895de69 | ||
|
|
28f0bce9f2 | ||
|
|
0f82f216a2 | ||
|
|
7454b1399c | ||
|
|
4ebf46bd63 | ||
|
|
f1cce0ef5f | ||
|
|
8c9e873c10 | ||
|
|
c85439e7bb | ||
|
|
fd7f87b55e | ||
|
|
8be4128c5a | ||
|
|
806e37338c | ||
|
|
ec1095624a | ||
|
|
a23d69ebe8 | ||
|
|
0aff61ffc6 | ||
|
|
05aa540984 | ||
|
|
033e83e490 | ||
|
|
594485c38c | ||
|
|
d52e2d5a8d | ||
|
|
1e5d852e2f | ||
|
|
cc32d913a0 | ||
|
|
fc66066d4d | ||
|
|
6169338815 | ||
|
|
86ee8db778 | ||
|
|
6bc8cb1ff1 | ||
|
|
0fc49b1c37 | ||
|
|
9fb981e9a0 | ||
|
|
cba1b3cedd | ||
|
|
12c4512932 | ||
|
|
f2452f040d | ||
|
|
0dd1dbb568 | ||
|
|
fdcec5a219 | ||
|
|
bebab7ab0d | ||
|
|
fb771b6aa3 | ||
|
|
8156559475 | ||
|
|
9f5e51cd01 | ||
|
|
27daab2f1b | ||
|
|
c4d404b15f | ||
|
|
95fcdc36ee | ||
|
|
2fdaba53c1 | ||
|
|
5c89080469 | ||
|
|
d92f9df17c | ||
|
|
f551390420 | ||
|
|
8642b4d89f | ||
|
|
6fb70c307d | ||
|
|
d08346fbcf | ||
|
|
141d240a91 | ||
|
|
cf9ceb6bf9 | ||
|
|
7589ae0de5 | ||
|
|
f46e5b37e9 | ||
|
|
560acd5017 | ||
|
|
2267f278d2 | ||
|
|
0feeef585c | ||
|
|
6211966c55 | ||
|
|
92f591b4bd | ||
|
|
29ceb42b7b | ||
|
|
adaabe5993 | ||
|
|
6c392ee4a1 | ||
|
|
7699eda5ba | ||
|
|
d8b5fd5409 | ||
|
|
b37ffdbe85 | ||
|
|
481bcc732b | ||
|
|
ce175aee4c | ||
|
|
50896b373b | ||
|
|
1a40f936df | ||
|
|
1024ba9b0f | ||
|
|
1a7ac8b804 | ||
|
|
7bedb4a081 | ||
|
|
630215f56f | ||
|
|
6f0e5fd402 | ||
|
|
66ec43739a | ||
|
|
44f9d1ed78 | ||
|
|
c6d479b8ad | ||
|
|
80e2f4e342 | ||
|
|
4b388edca9 | ||
|
|
5362dade37 | ||
|
|
3d24265d50 | ||
|
|
7057fc2930 | ||
|
|
e661ee95ff | ||
|
|
333b89fa07 | ||
|
|
04e888548e | ||
|
|
403d9e1059 | ||
|
|
4ea02c59d8 | ||
|
|
de7ba7a55b | ||
|
|
23ba61e76f | ||
|
|
c0aa7e0314 | ||
|
|
9f44e597d6 | ||
|
|
60c5bef90f | ||
|
|
a38fcf1127 | ||
|
|
f22e237381 | ||
|
|
b6b9daa3c5 | ||
|
|
d958b0b9d6 | ||
|
|
7b2eaf63af | ||
|
|
aef20b536a | ||
|
|
1a34b1410f | ||
|
|
5af2f80bc5 | ||
|
|
4d241736f0 | ||
|
|
a47460b4c3 | ||
|
|
32be338f60 | ||
|
|
549655bff4 | ||
|
|
3e18cec691 | ||
|
|
658dd3486b | ||
|
|
018e9a12a3 | ||
|
|
2027a6ac12 | ||
|
|
26bec62daf | ||
|
|
7497e86902 | ||
|
|
e084f1c311 | ||
|
|
95950885cf | ||
|
|
9cd84aeea9 | ||
|
|
d324ec247e | ||
|
|
cbb0d6ce06 | ||
|
|
d36ab4cc3c | ||
|
|
1069a3c77e | ||
|
|
36da23c9c5 | ||
|
|
e756daa261 | ||
|
|
65ac336211 | ||
|
|
14fe987956 | ||
|
|
c5acf239f2 | ||
|
|
a02500b112 | ||
|
|
8fea85a85c | ||
|
|
6f42bfc640 | ||
|
|
6b8741dbd7 | ||
|
|
ce5d4b6ccc | ||
|
|
eb2e5f378c | ||
|
|
9251ea2b22 | ||
|
|
cb650d6100 | ||
|
|
3f3d9219ea | ||
|
|
11528b0def | ||
|
|
a5f0e713d6 | ||
|
|
de3a864bd6 | ||
|
|
af551b3c09 | ||
|
|
5d64d23b61 | ||
|
|
269a00f9ec | ||
|
|
29c482789f | ||
|
|
a0462fe1ee | ||
|
|
78a840f48d | ||
|
|
7371d82bdf | ||
|
|
4c35d9456a | ||
|
|
0704081d91 | ||
|
|
5898532605 | ||
|
|
603abf70dc | ||
|
|
0a3822f2e5 | ||
|
|
f76eb2b7f5 | ||
|
|
08bbf5f5ef | ||
|
|
4ea08116b8 | ||
|
|
78e03c6402 | ||
|
|
4ab89de343 | ||
|
|
6db460fb81 | ||
|
|
be859df51e | ||
|
|
3c533f2ba4 | ||
|
|
9cf037c90f | ||
|
|
9e0425e824 | ||
|
|
2960479095 | ||
|
|
baf1ff033a | ||
|
|
52dcbf087a | ||
|
|
d1c6331924 | ||
|
|
0af2a13349 | ||
|
|
7f0c92eb4d | ||
|
|
0f86255279 | ||
|
|
b2517c8a18 | ||
|
|
95d0c5e67b | ||
|
|
0885b2bf23 | ||
|
|
706337bb5a | ||
|
|
3f8a678c5a | ||
|
|
0f631ad49b | ||
|
|
5bc3b4f768 | ||
|
|
e8bd464ab2 | ||
|
|
ef1af547e2 | ||
|
|
f2dcad27bb | ||
|
|
01992006b2 | ||
|
|
487820fb4d | ||
|
|
d760d67598 | ||
|
|
3cb827ac56 | ||
|
|
15c5dfc12b | ||
|
|
524939dc5b | ||
|
|
51fdff208e | ||
|
|
65a309648b | ||
|
|
7d08eeb8dd | ||
|
|
9e0428ba0d | ||
|
|
15461a2460 | ||
|
|
88f21b5c57 | ||
|
|
bee3029764 | ||
|
|
150d6d1f56 | ||
|
|
d03e4ac100 | ||
|
|
8d8d9c63fe | ||
|
|
52147ce631 | ||
|
|
5af41df8a5 | ||
|
|
775ecd6dfe | ||
|
|
b139235c62 | ||
|
|
8f2c910600 | ||
|
|
7e67f01d4b | ||
|
|
ad7e800446 | ||
|
|
6326924de7 | ||
|
|
801a3a6ff5 | ||
|
|
a4e94a26ba | ||
|
|
44e6be7914 | ||
|
|
3aaf2ef2d4 | ||
|
|
8f902fde9c | ||
|
|
b6023c517e | ||
|
|
42d77e9191 | ||
|
|
312a7582df | ||
|
|
dc939eba78 | ||
|
|
f8bec51de2 | ||
|
|
0bf1320a32 | ||
|
|
81dbd504aa | ||
|
|
63dd7d9859 | ||
|
|
2063d34f3e | ||
|
|
83fdc2e5ad | ||
|
|
6ba7368ab0 | ||
|
|
c2805942a9 | ||
|
|
9892c8bf9a | ||
|
|
23e5877509 | ||
|
|
8cbfde6092 | ||
|
|
24087ff3cc | ||
|
|
6827001c1d | ||
|
|
16b0806d40 | ||
|
|
2129b1e27d | ||
|
|
a267762f59 | ||
|
|
65ca795030 | ||
|
|
e82b649ec0 | ||
|
|
275cdb1713 | ||
|
|
d3b86dcc90 | ||
|
|
c736b75075 | ||
|
|
b601331362 | ||
|
|
32d44a5b9e | ||
|
|
810784da1f | ||
|
|
d517b37f3f | ||
|
|
adeef0af01 | ||
|
|
97ddc1ed10 | ||
|
|
bf580648a1 | ||
|
|
ecc54fa0eb | ||
|
|
04d32ae3e6 | ||
|
|
a18b3ae88e | ||
|
|
e57801a5d1 | ||
|
|
da4390aede | ||
|
|
9e85667219 | ||
|
|
b80867d473 | ||
|
|
d742dcce59 | ||
|
|
7a7af3d5f9 | ||
|
|
e323b1d0ad | ||
|
|
3c18c7a713 | ||
|
|
7c16292cb7 | ||
|
|
d665e2e85b | ||
|
|
172a189c6f | ||
|
|
406fbab40e | ||
|
|
09dc217f8c | ||
|
|
9002837750 | ||
|
|
411d5b44ef | ||
|
|
360cc8044e | ||
|
|
ec2e9b5e79 | ||
|
|
881dba61e4 | ||
|
|
6412876f64 | ||
|
|
538d51cbfe | ||
|
|
3dd9ff3d84 | ||
|
|
7f386923b0 | ||
|
|
d2312b1fbd | ||
|
|
6655373ac3 | ||
|
|
d492af7bc0 | ||
|
|
230a7b7374 | ||
|
|
4204a752f7 | ||
|
|
0e88d5f97f | ||
|
|
a13e7f2435 | ||
|
|
be2108260e | ||
|
|
59b0a2b208 | ||
|
|
05a5a42a08 | ||
|
|
f0b0618484 | ||
|
|
4ecdbe4bd9 | ||
|
|
9e9f266e52 | ||
|
|
0ce67f37ac | ||
|
|
ddcd0a49ec | ||
|
|
63b8fac852 | ||
|
|
def8d7850b | ||
|
|
0442efc856 | ||
|
|
f928bbb53c | ||
|
|
1ab7500dbb | ||
|
|
c58d92d46b | ||
|
|
8276e912fd | ||
|
|
e0490d0df5 | ||
|
|
11db466a88 | ||
|
|
caaee0b666 | ||
|
|
f2f470f369 | ||
|
|
09bb36f58c | ||
|
|
21719df6fd | ||
|
|
39329809dd | ||
|
|
44797e2925 | ||
|
|
c8f373d119 | ||
|
|
8a22c63889 | ||
|
|
1a4434d314 | ||
|
|
165a13b13e | ||
|
|
43364b2d69 | ||
|
|
6eaecd20d5 | ||
|
|
c80bfeacf6 | ||
|
|
2a19cc1758 | ||
|
|
8f5189f606 | ||
|
|
49dde7c6f2 | ||
|
|
765a0d8896 | ||
|
|
19d8f2e258 | ||
|
|
e6aec96e05 | ||
|
|
a2d42c3242 | ||
|
|
52836aae87 | ||
|
|
bda566d6a7 | ||
|
|
63ed90b0fd | ||
|
|
0bb4d282e2 | ||
|
|
ae89a65dad | ||
|
|
e9fe9f5043 | ||
|
|
ce8dc5927c | ||
|
|
f6989cce38 | ||
|
|
6dbbf9aa80 | ||
|
|
fe6282e837 | ||
|
|
51210a869b | ||
|
|
658652a9ff | ||
|
|
aecd6e0878 | ||
|
|
1334a84861 | ||
|
|
6a410fc30e | ||
|
|
984a68c3a9 | ||
|
|
daf5aa8e8b | ||
|
|
98b2e0e426 | ||
|
|
9a1932eaf7 | ||
|
|
371d4be8ef | ||
|
|
d180031ef0 | ||
|
|
e09e953bbb | ||
|
|
2c640f7e52 | ||
|
|
2bacebb1fb | ||
|
|
df18b2a150 | ||
|
|
216ac4b1a4 | ||
|
|
898cded646 | ||
|
|
c09c87873e | ||
|
|
10b79fb41b | ||
|
|
ec0280be11 | ||
|
|
8e19d54e75 | ||
|
|
3c070e5e20 | ||
|
|
dde599f48f | ||
|
|
cc15ecfb3a | ||
|
|
7a7c54bd59 | ||
|
|
bea88ab122 | ||
|
|
926b3b9ee3 | ||
|
|
bc7775aef2 | ||
|
|
107669686c | ||
|
|
bb11b3ab66 | ||
|
|
516ba85abd | ||
|
|
098277b4f0 | ||
|
|
950a989744 | ||
|
|
fb8b893b10 | ||
|
|
9ca80debb8 | ||
|
|
080241b7d1 | ||
|
|
0d534720bb | ||
|
|
1dc4424a30 | ||
|
|
57f0cf30c0 | ||
|
|
8ef6bc1636 | ||
|
|
974b40c8af | ||
|
|
45e9e0be0b | ||
|
|
ec0918045d | ||
|
|
38bcecd2f3 | ||
|
|
aabbdba068 | ||
|
|
84c183da1f | ||
|
|
b363b98211 | ||
|
|
8defbeb248 | ||
|
|
f52d227d80 | ||
|
|
78cb45fb25 | ||
|
|
2d8026625b | ||
|
|
73afab464f | ||
|
|
8aa139b6be | ||
|
|
e5fe0eabdc | ||
|
|
0d3993fa25 | ||
|
|
ac421f68e2 | ||
|
|
b9d1f0db18 | ||
|
|
6aad4c7a39 | ||
|
|
4186ef204d | ||
|
|
ae7a094ee0 | ||
|
|
3a007f939a | ||
|
|
b8503b9255 | ||
|
|
b7bc76d3cc | ||
|
|
27d6c12972 | ||
|
|
b69d783e09 | ||
|
|
3b2ff6301c | ||
|
|
6c7043916e | ||
|
|
96a6e75b71 | ||
|
|
a91e4e7981 | ||
|
|
95d8f76ec3 | ||
|
|
66d4c2ddd9 | ||
|
|
8115ca739a | ||
|
|
ec4021bbf4 | ||
|
|
e431b07e04 | ||
|
|
d34a87404d | ||
|
|
f38770bf2a | ||
|
|
dc9998ccaf | ||
|
|
f1b3703389 | ||
|
|
b6a8d0ee7f | ||
|
|
2a4dff38d0 | ||
|
|
665c564dcf | ||
|
|
ed71413e04 | ||
|
|
4b5e49b00b | ||
|
|
f558ee788e | ||
|
|
ceb8ca680c | ||
|
|
79ebcbec4b | ||
|
|
2c7b650240 | ||
|
|
54459255d4 | ||
|
|
b4a078e2f6 | ||
|
|
ed13dd066b | ||
|
|
2b4a3b22bf | ||
|
|
8b891da628 | ||
|
|
5a2c8342eb | ||
|
|
50eb4bf53a | ||
|
|
3c10ddd46a | ||
|
|
0b7f9acc70 | ||
|
|
10fbaec247 | ||
|
|
007a734595 | ||
|
|
46716aada3 | ||
|
|
3bc66136b2 | ||
|
|
fae47e0dfc | ||
|
|
bd52e86486 | ||
|
|
b2f6ed7209 | ||
|
|
4b334fd2e2 | ||
|
|
a23a7006e3 | ||
|
|
f47171a17c | ||
|
|
4945dc3682 | ||
|
|
ada66b5313 | ||
|
|
96450e17a3 | ||
|
|
40a295e951 | ||
|
|
d6c6f95373 | ||
|
|
19b46be20d | ||
|
|
789e04ce90 | ||
|
|
dd4f0a600b | ||
|
|
6c7df4cb6b | ||
|
|
79e0a9f32a | ||
|
|
6c9bc63a1c | ||
|
|
28a821df7d | ||
|
|
27e39954d6 | ||
|
|
e730a5364b | ||
|
|
92b3ae41dd | ||
|
|
89a2566e01 | ||
|
|
1ac3e03171 | ||
|
|
b86d40091a | ||
|
|
91d22d150f | ||
|
|
1d29991268 | ||
|
|
6f0a2686dc | ||
|
|
f06caabb07 | ||
|
|
3c869802fb | ||
|
|
7b6bd90903 | ||
|
|
967bfa9c92 | ||
|
|
592affb984 | ||
|
|
96aaf6d53b | ||
|
|
1397dbdabc | ||
|
|
6118643232 | ||
|
|
71198a0b54 | ||
|
|
22cb80399f | ||
|
|
fa1fd8a576 | ||
|
|
6df7d31a5b | ||
|
|
ef049e92ef | ||
|
|
fe8b109ca5 | ||
|
|
8fd9b84a80 | ||
|
|
5cb53f52c3 | ||
|
|
d86653668e | ||
|
|
5084712a15 | ||
|
|
ece65cab18 | ||
|
|
1f6075506c | ||
|
|
51ade48e3d | ||
|
|
21c43737fe | ||
|
|
6c7bcf00e7 | ||
|
|
7a2142075c | ||
|
|
e8e9baa417 | ||
|
|
449d956966 | ||
|
|
90db01d038 | ||
|
|
38cea6dc71 | ||
|
|
64807dfb3b | ||
|
|
d943455e10 | ||
|
|
fd03ba7586 | ||
|
|
2c5a57e386 | ||
|
|
e8858150cb | ||
|
|
333f901187 | ||
|
|
7dd4d6c75e | ||
|
|
99f57cfda6 | ||
|
|
4d1eb94dfd | ||
|
|
22d584f302 | ||
|
|
72c41f104e | ||
|
|
8d3ac3ac1e | ||
|
|
299ae186f1 | ||
|
|
f4df2fb176 | ||
|
|
625fbef613 | ||
|
|
fbed0ac56b | ||
|
|
dc120f3962 | ||
|
|
4f053e5b83 | ||
|
|
c6241581a0 | ||
|
|
041ade66d5 | ||
|
|
067a2949ba | ||
|
|
55c754750e | ||
|
|
72b6c12856 | ||
|
|
15ea0af687 | ||
|
|
ee7e367981 | ||
|
|
8006589828 | ||
|
|
413264eaae | ||
|
|
7db8824da2 | ||
|
|
e1bc010bd1 | ||
|
|
bff02017da | ||
|
|
c0019bd8e5 | ||
|
|
e495ef2c48 | ||
|
|
78d62705cc | ||
|
|
2791bd0015 | ||
|
|
7cf66eb61f | ||
|
|
944c53bff1 | ||
|
|
c756c855ea | ||
|
|
58bb2826b2 | ||
|
|
b7bef87a4d | ||
|
|
0c1b206185 | ||
|
|
7d7e99a92c | ||
|
|
1ba8d7ef74 | ||
|
|
d99bd279e8 | ||
|
|
ee1fe3aa9f | ||
|
|
c4b1d79c5c | ||
|
|
a1a43cdfe0 | ||
|
|
27b62781cc | ||
|
|
0c5d7ff8f2 | ||
|
|
0e2b315ded | ||
|
|
3e74d1c544 | ||
|
|
da690acce5 | ||
|
|
0baa2b484d | ||
|
|
260d7298c3 | ||
|
|
d5cc2ad643 | ||
|
|
12706cd37f | ||
|
|
7167442d6e | ||
|
|
8547101c4b | ||
|
|
5d58a9e4c2 | ||
|
|
cd98a29a4b | ||
|
|
903714fd40 | ||
|
|
138c7acf22 | ||
|
|
03b2b8ae8f | ||
|
|
016b502d46 | ||
|
|
c5f6653564 | ||
|
|
cf9a4e209e | ||
|
|
040421942f | ||
|
|
4dfc596d38 | ||
|
|
fe83ef7635 | ||
|
|
db8b08131f | ||
|
|
32815e628d | ||
|
|
71bdc67a45 | ||
|
|
cb9f50ef63 | ||
|
|
12c754c92b | ||
|
|
e4b3d03da5 | ||
|
|
cc26b66e99 | ||
|
|
34d81fa522 | ||
|
|
49f1a5c2b3 | ||
|
|
326c45fa17 | ||
|
|
a2bb899a6b | ||
|
|
9fedb1674e | ||
|
|
7c91b01125 | ||
|
|
c202e9e106 | ||
|
|
645a8c9349 | ||
|
|
093fdcf3df | ||
|
|
7abda5e8c2 | ||
|
|
abf7c423bb | ||
|
|
55d5c07d00 | ||
|
|
0a9b272fe4 | ||
|
|
b9d6ba2aa0 | ||
|
|
a0c9f7823b | ||
|
|
4477a9c59a | ||
|
|
99a27fe241 | ||
|
|
fefa86e0cf | ||
|
|
098c4910de | ||
|
|
17b7148300 | ||
|
|
f4a2ef28e3 | ||
|
|
f0d013ee76 | ||
|
|
5ece6fec04 | ||
|
|
d88dbf3612 | ||
|
|
2a18efef82 | ||
|
|
fd846fbe77 | ||
|
|
ca7cc4744e | ||
|
|
491fa239bd | ||
|
|
66765dc123 | ||
|
|
70a5348f43 | ||
|
|
2aa61007c6 | ||
|
|
acfbe77ffc | ||
|
|
08696653ca | ||
|
|
8a1a214ca9 | ||
|
|
7aaeb27e0f | ||
|
|
972043c146 | ||
|
|
8475dc082a | ||
|
|
d0e583b29c | ||
|
|
c8feee238b | ||
|
|
6712ecd928 | ||
|
|
d0c7b5d35c | ||
|
|
802add1f97 | ||
|
|
95556811fa | ||
|
|
581472564d | ||
|
|
c7dc8862a5 | ||
|
|
4f8cf019ca | ||
|
|
4c9ac7fcf1 | ||
|
|
1dac05960a | ||
|
|
c27418da77 | ||
|
|
637d076e99 | ||
|
|
391678a5b3 | ||
|
|
4cd0cf1650 | ||
|
|
b813452d33 | ||
|
|
eb85da81e1 | ||
|
|
920cf63201 | ||
|
|
dc09d46bf4 | ||
|
|
05d1b06eeb | ||
|
|
c1661eb06b | ||
|
|
e9626a1d10 | ||
|
|
560bf5ca09 | ||
|
|
512f8d8b60 | ||
|
|
87c8a89349 | ||
|
|
255791f18e | ||
|
|
d5e3416e8e | ||
|
|
5b2d43f665 | ||
|
|
540fc6c2f3 | ||
|
|
b3c5043dcc | ||
|
|
d0d9aae968 | ||
|
|
3270e2bf5a | ||
|
|
013a3e7567 | ||
|
|
8368ba8539 | ||
|
|
ca0310e335 | ||
|
|
4690a678c1 | ||
|
|
f8a39402a2 | ||
|
|
b923e4daea | ||
|
|
247775d1ec | ||
|
|
6e9fea377d | ||
|
|
ca5c65d032 | ||
|
|
f9dc621ebe | ||
|
|
ffe484c31e | ||
|
|
62cd3418ca | ||
|
|
d8a8f3a996 | ||
|
|
0ad8dbbfc9 | ||
|
|
e15a1946c6 | ||
|
|
8878826661 | ||
|
|
95a8b6e5e8 | ||
|
|
388d0d2cfd | ||
|
|
d3a374e71c | ||
|
|
1da2834b1e | ||
|
|
ca3100874f | ||
|
|
117f48a331 | ||
|
|
89bbceefee | ||
|
|
7e18f0e247 | ||
|
|
29c2f24faf | ||
|
|
3bb2dee275 | ||
|
|
88cd5584e8 | ||
|
|
41f9ce2560 | ||
|
|
20044f5749 | ||
|
|
833f0a6aa7 | ||
|
|
10c5ba140c | ||
|
|
316de0b880 | ||
|
|
989966f81b | ||
|
|
ccd550dc52 | ||
|
|
ddf350839a | ||
|
|
6a7dd2787a | ||
|
|
385771e73e | ||
|
|
349ab0b9c5 | ||
|
|
b5e6c6a2f3 | ||
|
|
2832ea641f | ||
|
|
cb7edf2725 | ||
|
|
f1f1be2822 | ||
|
|
7dffd65609 | ||
|
|
2c8a44e28b | ||
|
|
39bb95a6ee | ||
|
|
da9dba80a0 | ||
|
|
12f3285f9b | ||
|
|
7e954e4248 | ||
|
|
d74cc6397b | ||
|
|
777343331e | ||
|
|
a062653743 | ||
|
|
57af0eb64f | ||
|
|
60aae16752 | ||
|
|
e264d95019 | ||
|
|
0664f5a724 | ||
|
|
17c6a19527 | ||
|
|
cbc8b8259b | ||
|
|
1067a2e4be | ||
|
|
74a031a759 | ||
|
|
ee437193fb | ||
|
|
436c53037e | ||
|
|
f55ba9d3cb | ||
|
|
8adb99b768 | ||
|
|
13c42412d2 | ||
|
|
75507d8b35 | ||
|
|
ddfe4932ac | ||
|
|
cf208cc2e3 | ||
|
|
28ac016928 | ||
|
|
f4ae41d006 | ||
|
|
9ec8e5a275 | ||
|
|
a473046058 | ||
|
|
a69b7a5a01 | ||
|
|
640918bcc0 | ||
|
|
f39fbdb3fc | ||
|
|
50d4d81062 | ||
|
|
3b95452481 | ||
|
|
c152ae3c32 | ||
|
|
f6cbaa78e8 | ||
|
|
7adb250b59 | ||
|
|
db5db5aefd | ||
|
|
8fdf84de04 | ||
|
|
ff5cbe80d1 | ||
|
|
e013e0a374 | ||
|
|
b7df312ca7 | ||
|
|
ce82c3c0ae | ||
|
|
2f958cfbda | ||
|
|
8ef41dfd97 | ||
|
|
3082ea4765 | ||
|
|
e482d29951 | ||
|
|
ff48dd7bfb | ||
|
|
7bf9c11822 | ||
|
|
f7937f1e4b | ||
|
|
0115eeabfe | ||
|
|
4b9c3ec0da | ||
|
|
55b81e35a7 | ||
|
|
2a1c7f2d47 | ||
|
|
8603f9838f | ||
|
|
95224f3f11 | ||
|
|
f81acbfe80 | ||
|
|
6d7ff7eba2 | ||
|
|
ad429db7e8 | ||
|
|
4c07abbaf4 | ||
|
|
e3c0551129 | ||
|
|
8971baa42b | ||
|
|
317a1f51f7 | ||
|
|
c63d139482 | ||
|
|
9e682362e9 | ||
|
|
56ec939692 | ||
|
|
a86b942730 | ||
|
|
52eb4c6014 | ||
|
|
f4adbbf90c | ||
|
|
cc86e4a7d2 | ||
|
|
e864447e4a | ||
|
|
73bf552cd6 | ||
|
|
f20a2d2ee9 | ||
|
|
0c25bc063c | ||
|
|
db72781d2a | ||
|
|
0c8ad09040 | ||
|
|
49880ab761 | ||
|
|
fe2d9aa600 | ||
|
|
1dead425e4 | ||
|
|
adb1e47a59 | ||
|
|
ffba8580c1 | ||
|
|
ea18427d29 | ||
|
|
97d42f5c53 | ||
|
|
f3089df086 | ||
|
|
157e7c97ae | ||
|
|
bb8e13e3c9 | ||
|
|
5b4673e8eb | ||
|
|
5b9de8cc07 | ||
|
|
33ea934c8f | ||
|
|
6b3e14b0a4 | ||
|
|
098ceb5567 | ||
|
|
8e2b0632e8 | ||
|
|
420d373d89 | ||
|
|
a59fd7eeb3 | ||
|
|
ee91fa1228 | ||
|
|
a2b5ce0172 | ||
|
|
3efbc71a01 | ||
|
|
b7c5af7e64 | ||
|
|
f939015b97 | ||
|
|
a9ed71f553 | ||
|
|
96a429694f | ||
|
|
fddc5e022e | ||
|
|
2236d53def | ||
|
|
4e018d0a20 | ||
|
|
977b983771 | ||
|
|
fa7a7fe23e | ||
|
|
724a843bbd | ||
|
|
a9ec745275 | ||
|
|
c2ecc15b93 | ||
|
|
83c8650b36 | ||
|
|
89cb809922 | ||
|
|
fdb4eaf437 | ||
|
|
0432f97555 | ||
|
|
8d1631b714 | ||
|
|
dac091552d | ||
|
|
ea027a95a8 | ||
|
|
f73abb05a7 | ||
|
|
d71c49494f | ||
|
|
25665f0841 | ||
|
|
1eec27f890 | ||
|
|
950f86200b | ||
|
|
e19f4931d1 | ||
|
|
0575b1f38d | ||
|
|
f6cd01f7cf | ||
|
|
f2fbc168af | ||
|
|
b50f6f1730 | ||
|
|
f8a7120d9c | ||
|
|
20dbf59420 | ||
|
|
c67a286aa6 | ||
|
|
c96fef6bc8 | ||
|
|
bba02f87ea | ||
|
|
12dc3f5c28 | ||
|
|
0f01a5dcbe | ||
|
|
664dc3bdda | ||
|
|
bdba3cd97d | ||
|
|
d9c0f9315a | ||
|
|
b7f17d435f | ||
|
|
37cdc18639 | ||
|
|
5893a9c49d | ||
|
|
24f58fa16a | ||
|
|
56ffc78fa4 | ||
|
|
061e68bc77 | ||
|
|
177e6312b4 | ||
|
|
1acf4032c2 | ||
|
|
0db752f3a2 | ||
|
|
9c5444698e | ||
|
|
65f3252760 | ||
|
|
e612abe4ba | ||
|
|
ee8b6ebbf6 | ||
|
|
34352e4e0e | ||
|
|
1867b5b317 | ||
|
|
a5b7fca7e0 | ||
|
|
7be2c399b1 | ||
|
|
d6337b3b22 | ||
|
|
d2f8b0ace5 | ||
|
|
d805e8b183 | ||
|
|
1f0f2ec05f | ||
|
|
91ac3b9d7c | ||
|
|
d65bf2eb2f | ||
|
|
1bba9d4307 | ||
|
|
4388338dad | ||
|
|
2fb59c90cf | ||
|
|
68f6ea8def | ||
|
|
3f89295d10 | ||
|
|
748b292e77 | ||
|
|
6451c3d99d | ||
|
|
d14a2de168 | ||
|
|
642150095d | ||
|
|
3bf3ac7922 | ||
|
|
c6d1cebad4 | ||
|
|
08189ce08c | ||
|
|
7013d7d52f | ||
|
|
7045b76f84 | ||
|
|
58a0b4a20d | ||
|
|
0f8eee9809 | ||
|
|
0740299860 | ||
|
|
652215861e | ||
|
|
602209e5a8 | ||
|
|
b60f8b4f70 | ||
|
|
f2b99ccb08 | ||
|
|
b67446d998 | ||
|
|
9670ab0887 | ||
|
|
0223bb85ee | ||
|
|
fd81255db1 | ||
|
|
8a8e1a7f73 | ||
|
|
ef05fbf424 | ||
|
|
fa01b63fa5 | ||
|
|
63d3d25030 | ||
|
|
a8db866228 | ||
|
|
0519eea951 | ||
|
|
5d67252ed0 | ||
|
|
59f4c9985e |
25
.gitignore
vendored
25
.gitignore
vendored
@@ -1,8 +1,31 @@
|
||||
*.pyc
|
||||
*~
|
||||
tags
|
||||
depend
|
||||
ispc
|
||||
ispc_test
|
||||
ispc_ref
|
||||
llvm/
|
||||
objs
|
||||
docs/doxygen
|
||||
docs/ispc.html
|
||||
docs/*.html
|
||||
tests*/*cpp
|
||||
tests*/*run
|
||||
tests*/*.o
|
||||
tests_ispcpp/*.h
|
||||
tests_ispcpp/*pre*
|
||||
logs/
|
||||
notify_log.log
|
||||
alloy_results_*
|
||||
examples/*/*.png
|
||||
examples/*/*.ppm
|
||||
examples/*/objs/*
|
||||
examples/*/ref
|
||||
examples/*/test
|
||||
*.swp
|
||||
check_isa.exe
|
||||
.vscode
|
||||
configure
|
||||
ispc.dSYM
|
||||
|
||||
|
||||
|
||||
47
LICENSE.txt
47
LICENSE.txt
@@ -1,4 +1,4 @@
|
||||
Copyright (c) 2010-2011, Intel Corporation
|
||||
Copyright (c) 2010-2016, Intel Corporation
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
@@ -77,7 +77,7 @@ covered by the following license:
|
||||
University of Illinois/NCSA
|
||||
Open Source License
|
||||
|
||||
Copyright (c) 2003-2010 University of Illinois at Urbana-Champaign.
|
||||
Copyright (c) 2003-2014 University of Illinois at Urbana-Champaign.
|
||||
All rights reserved.
|
||||
|
||||
Developed by:
|
||||
@@ -141,3 +141,46 @@ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
---------------------------------------------------------------------------
|
||||
|
||||
The ptxtools use parts of the PTX parser code from GPU Ocelot project
|
||||
(https://code.google.com/p/gpuocelot/), which is covered by the following
|
||||
license:
|
||||
|
||||
Copyright 2011
|
||||
GEORGIA TECH RESEARCH CORPORATION
|
||||
ALL RIGHTS RESERVED
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimers.
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimers in the
|
||||
documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of GEORGIA TECH RESEARCH CORPORATION nor the
|
||||
names of its contributors may be used to endorse or promote
|
||||
products derived from this software without specific prior
|
||||
written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY GEORGIA TECH RESEARCH CORPORATION ''AS IS''
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
||||
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GEORGIA TECH RESEARCH
|
||||
CORPORATION BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
You agree that the Software will not be shipped, transferred, exported,
|
||||
or re-exported directly into any country prohibited by the United States
|
||||
Export Administration Act and the regulations thereunder nor will be
|
||||
used for any purpose prohibited by the Act.
|
||||
|
||||
|
||||
|
||||
299
Makefile
299
Makefile
@@ -1,7 +1,86 @@
|
||||
#
|
||||
# Copyright (c) 2010-2016, Intel Corporation
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
#
|
||||
# * Redistributions in binary form must reproduce the above copyright
|
||||
# notice, this list of conditions and the following disclaimer in the
|
||||
# documentation and/or other materials provided with the distribution.
|
||||
#
|
||||
# * Neither the name of Intel Corporation nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
||||
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
||||
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
|
||||
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
|
||||
# OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#
|
||||
# ispc Makefile
|
||||
#
|
||||
|
||||
define newline
|
||||
|
||||
|
||||
endef
|
||||
|
||||
define WARNING_BODY
|
||||
============================== !!! WARNING !!! =============================== \n
|
||||
Location of LLVM files in your PATH is different than path in LLVM_HOME \n
|
||||
variable (or LLVM_HOME is not set). The most likely this means that you are \n
|
||||
using default LLVM installation on your system, which is very bad sign. \n
|
||||
Note, that ISPC uses LLVM optimizer and is highly dependent on it. We recommend \n
|
||||
using *patched* version of LLVM 3.8. Patches are availible in \n
|
||||
llvm_patches folder. You can build LLVM manually, or run our scripts, which \n
|
||||
will do all the work for you. Do the following: \n
|
||||
1. Create a folder, where LLVM will reside and set LLVM_HOME variable to its \n
|
||||
path. \n
|
||||
2. Set ISPC_HOME variable to your ISPC location (probably current folder).
|
||||
3. Run alloy.py tool to checkout and build LLVM: \n
|
||||
alloy.py -b --version=3.8 \n
|
||||
4. Add $$LLVM_HOME/bin-3.8/bin path to your PATH. \n
|
||||
==============================================================================
|
||||
endef
|
||||
|
||||
# If you have your own special version of llvm and/or clang, change
|
||||
# these variables to match.
|
||||
LLVM_CONFIG=$(shell which llvm-config)
|
||||
CLANG_INCLUDE=$(shell $(LLVM_CONFIG) --includedir)
|
||||
|
||||
RIGHT_LLVM = $(WARNING_BODY)
|
||||
ifdef LLVM_HOME
|
||||
ifeq ($(findstring $(LLVM_HOME), $(LLVM_CONFIG)), $(LLVM_HOME))
|
||||
RIGHT_LLVM = LLVM from $$LLVM_HOME is used.
|
||||
endif
|
||||
endif
|
||||
|
||||
# Enable ARM by request
|
||||
# To enable: make ARM_ENABLED=1
|
||||
ARM_ENABLED=0
|
||||
|
||||
# Disable NVPTX by request
|
||||
# To enable: make NVPTX_ENABLED=1
|
||||
NVPTX_ENABLED=0
|
||||
|
||||
# Add llvm bin to the path so any scripts run will go to the right llvm-config
|
||||
LLVM_BIN= $(shell $(LLVM_CONFIG) --bindir)
|
||||
export PATH:=$(LLVM_BIN):$(PATH)
|
||||
|
||||
ARCH_OS = $(shell uname)
|
||||
ifeq ($(ARCH_OS), Darwin)
|
||||
ARCH_OS2 = "OSX"
|
||||
@@ -10,29 +89,51 @@ else
|
||||
endif
|
||||
ARCH_TYPE = $(shell arch)
|
||||
|
||||
ifeq ($(shell llvm-config --version), 3.1svn)
|
||||
LLVM_LIBS=-lLLVMAsmParser -lLLVMInstrumentation -lLLVMLinker \
|
||||
-lLLVMArchive -lLLVMBitReader -lLLVMDebugInfo -lLLVMJIT -lLLVMipo \
|
||||
-lLLVMBitWriter -lLLVMTableGen -lLLVMCBackendInfo \
|
||||
-lLLVMX86Disassembler -lLLVMX86CodeGen -lLLVMSelectionDAG \
|
||||
-lLLVMAsmPrinter -lLLVMX86AsmParser -lLLVMX86Desc -lLLVMX86Info \
|
||||
-lLLVMX86AsmPrinter -lLLVMX86Utils -lLLVMMCDisassembler -lLLVMMCParser \
|
||||
-lLLVMCodeGen -lLLVMScalarOpts -lLLVMInstCombine -lLLVMTransformUtils \
|
||||
-lLLVMipa -lLLVMAnalysis -lLLVMMCJIT -lLLVMRuntimeDyld \
|
||||
-lLLVMExecutionEngine -lLLVMTarget -lLLVMMC -lLLVMObject -lLLVMCore \
|
||||
-lLLVMSupport
|
||||
else
|
||||
LLVM_LIBS=$(shell llvm-config --libs)
|
||||
DNDEBUG_FLAG=$(shell $(LLVM_CONFIG) --cxxflags | grep -o "\-DNDEBUG")
|
||||
LLVM_CXXFLAGS=$(shell $(LLVM_CONFIG) --cppflags) $(DNDEBUG_FLAG)
|
||||
LLVM_VERSION=LLVM_$(shell $(LLVM_CONFIG) --version | sed -e 's/svn//' -e 's/\./_/' -e 's/\..*//')
|
||||
LLVM_VERSION_DEF=-D$(LLVM_VERSION)
|
||||
|
||||
LLVM_COMPONENTS = engine ipo bitreader bitwriter instrumentation linker
|
||||
# Component "option" was introduced in 3.3 and starting with 3.4 it is required for the link step.
|
||||
# We check if it's available before adding it (to not break 3.2 and earlier).
|
||||
ifeq ($(shell $(LLVM_CONFIG) --components |grep -c option), 1)
|
||||
LLVM_COMPONENTS+=option
|
||||
endif
|
||||
ifneq ($(ARM_ENABLED), 0)
|
||||
LLVM_COMPONENTS+=arm
|
||||
endif
|
||||
ifneq ($(NVPTX_ENABLED), 0)
|
||||
LLVM_COMPONENTS+=nvptx
|
||||
endif
|
||||
LLVM_LIBS=$(shell $(LLVM_CONFIG) --libs $(LLVM_COMPONENTS))
|
||||
|
||||
CLANG=clang
|
||||
CLANG_LIBS = -lclangFrontend -lclangDriver \
|
||||
-lclangSerialization -lclangParse -lclangSema \
|
||||
-lclangAnalysis -lclangAST -lclangLex -lclangBasic
|
||||
-lclangAnalysis -lclangAST -lclangBasic \
|
||||
-lclangEdit -lclangLex
|
||||
|
||||
ISPC_LIBS=$(shell llvm-config --ldflags) $(CLANG_LIBS) $(LLVM_LIBS) \
|
||||
ISPC_LIBS=$(shell $(LLVM_CONFIG) --ldflags) $(CLANG_LIBS) $(LLVM_LIBS) \
|
||||
-lpthread
|
||||
|
||||
ifeq ($(LLVM_VERSION),LLVM_3_4)
|
||||
ISPC_LIBS += -lcurses
|
||||
endif
|
||||
|
||||
# There is no logical OR in GNU make.
|
||||
# This 'ifneq' acts like if( !($(LLVM_VERSION) == LLVM_3_2 || $(LLVM_VERSION) == LLVM_3_3 || $(LLVM_VERSION) == LLVM_3_4))
|
||||
ifeq (,$(filter $(LLVM_VERSION), LLVM_3_2 LLVM_3_3 LLVM_3_4))
|
||||
ISPC_LIBS += -lcurses -lz
|
||||
# This is here because llvm-config fails to report dependency on tinfo library in some case.
|
||||
# This is described in LLVM bug 16902.
|
||||
ifeq ($(ARCH_OS),Linux)
|
||||
ifneq ($(shell ldconfig -p |grep -c tinfo), 0)
|
||||
ISPC_LIBS += -ltinfo
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
|
||||
ifeq ($(ARCH_OS),Linux)
|
||||
ISPC_LIBS += -ldl
|
||||
endif
|
||||
@@ -41,28 +142,52 @@ ifeq ($(ARCH_OS2),Msys)
|
||||
ISPC_LIBS += -lshlwapi -limagehlp -lpsapi
|
||||
endif
|
||||
|
||||
LLVM_CXXFLAGS=$(shell llvm-config --cppflags)
|
||||
LLVM_VERSION=LLVM_$(shell llvm-config --version | sed s/\\./_/)
|
||||
LLVM_VERSION_DEF=-D$(LLVM_VERSION)
|
||||
|
||||
# Define build time stamp and revision.
|
||||
# For revision we use GIT or SVN info.
|
||||
BUILD_DATE=$(shell date +%Y%m%d)
|
||||
BUILD_VERSION=$(shell git log --abbrev-commit --abbrev=16 | head -1)
|
||||
GIT_REVISION:=$(shell git log --abbrev-commit --abbrev=16 2>/dev/null | head -1)
|
||||
ifeq (${GIT_REVISION},)
|
||||
SVN_REVISION:=$(shell svn log -l 1 2>/dev/null | grep -o \^r[[:digit:]]\* )
|
||||
ifeq (${SVN_REVISION},)
|
||||
# Failed to get revision info
|
||||
BUILD_VERSION:="no_version_info"
|
||||
else
|
||||
# SVN revision info
|
||||
BUILD_VERSION:=$(SVN_REVISION)
|
||||
endif
|
||||
else
|
||||
# GIT revision info
|
||||
BUILD_VERSION:=$(GIT_REVISION)
|
||||
endif
|
||||
|
||||
CXX=g++
|
||||
CPP=cpp
|
||||
OPT=-g3
|
||||
CXXFLAGS=$(OPT) $(LLVM_CXXFLAGS) -I. -Iobjs/ -Wall $(LLVM_VERSION_DEF) \
|
||||
-DBUILD_DATE="\"$(BUILD_DATE)\"" -DBUILD_VERSION="\"$(BUILD_VERSION)\""
|
||||
CXX=clang++
|
||||
OPT=-O2
|
||||
CXXFLAGS=$(OPT) $(LLVM_CXXFLAGS) -I. -Iobjs/ -I$(CLANG_INCLUDE) \
|
||||
$(LLVM_VERSION_DEF) \
|
||||
-Wall \
|
||||
-DBUILD_DATE="\"$(BUILD_DATE)\"" -DBUILD_VERSION="\"$(BUILD_VERSION)\"" \
|
||||
-Wno-sign-compare -Wno-unused-function -Werror
|
||||
|
||||
# if( !($(LLVM_VERSION) == LLVM_3_2 || $(LLVM_VERSION) == LLVM_3_3 || $(LLVM_VERSION) == LLVM_3_4))
|
||||
ifeq (,$(filter $(LLVM_VERSION), LLVM_3_2 LLVM_3_3 LLVM_3_4))
|
||||
CXXFLAGS+=-std=c++11 -Wno-c99-extensions -Wno-deprecated-register -fno-rtti
|
||||
endif
|
||||
ifneq ($(ARM_ENABLED), 0)
|
||||
CXXFLAGS+=-DISPC_ARM_ENABLED
|
||||
endif
|
||||
ifneq ($(NVPTX_ENABLED), 0)
|
||||
CXXFLAGS+=-DISPC_NVPTX_ENABLED
|
||||
endif
|
||||
|
||||
LDFLAGS=
|
||||
ifeq ($(ARCH_OS),Linux)
|
||||
# try to link everything statically under Linux (including libstdc++) so
|
||||
# that the binaries we generate will be portable across distributions...
|
||||
ifeq ($(ARCH_TYPE),x86_64)
|
||||
LDFLAGS=-static -L/usr/lib/gcc/x86_64-linux-gnu/4.4
|
||||
else
|
||||
LDFLAGS=-L/usr/lib/gcc/i686-redhat-linux/4.6.0
|
||||
endif
|
||||
# LDFLAGS=-static
|
||||
# Linking everything statically isn't easy (too many things are required),
|
||||
# but linking libstdc++ and libgcc is necessary when building with relatively
|
||||
# new gcc, when going to distribute to old systems.
|
||||
# LDFLAGS=-static-libgcc -static-libstdc++
|
||||
endif
|
||||
|
||||
LEX=flex
|
||||
@@ -75,26 +200,39 @@ CXX_SRC=ast.cpp builtins.cpp cbackend.cpp ctx.cpp decl.cpp expr.cpp func.cpp \
|
||||
type.cpp util.cpp
|
||||
HEADERS=ast.h builtins.h ctx.h decl.h expr.h func.h ispc.h llvmutil.h module.h \
|
||||
opt.h stmt.h sym.h type.h util.h
|
||||
TARGETS=avx avx-x2 sse2 sse2-x2 sse4 sse4-x2 generic-4 generic-8 generic-16
|
||||
BUILTINS_SRC=$(addprefix builtins/target-, $(addsuffix .ll, $(TARGETS))) \
|
||||
builtins/dispatch.ll
|
||||
BUILTINS_OBJS=$(addprefix builtins-, $(notdir $(BUILTINS_SRC:.ll=.o))) \
|
||||
builtins-c-32.cpp builtins-c-64.cpp
|
||||
TARGETS=avx2-i64x4 avx11-i64x4 avx1-i64x4 avx1 avx1-x2 avx11 avx11-x2 avx2 avx2-x2 \
|
||||
sse2 sse2-x2 sse4-8 sse4-16 sse4 sse4-x2 \
|
||||
generic-4 generic-8 generic-16 generic-32 generic-64 generic-1 knl skx
|
||||
ifneq ($(ARM_ENABLED), 0)
|
||||
TARGETS+=neon-32 neon-16 neon-8
|
||||
endif
|
||||
ifneq ($(NVPTX_ENABLED), 0)
|
||||
TARGETS+=nvptx
|
||||
endif
|
||||
# These files need to be compiled in two versions - 32 and 64 bits.
|
||||
BUILTINS_SRC_TARGET=$(addprefix builtins/target-, $(addsuffix .ll, $(TARGETS)))
|
||||
# These are files to be compiled in single version.
|
||||
BUILTINS_SRC_COMMON=builtins/dispatch.ll
|
||||
BUILTINS_OBJS_32=$(addprefix builtins-, $(notdir $(BUILTINS_SRC_TARGET:.ll=-32bit.o)))
|
||||
BUILTINS_OBJS_64=$(addprefix builtins-, $(notdir $(BUILTINS_SRC_TARGET:.ll=-64bit.o)))
|
||||
BUILTINS_OBJS=$(addprefix builtins-, $(notdir $(BUILTINS_SRC_COMMON:.ll=.o))) \
|
||||
$(BUILTINS_OBJS_32) $(BUILTINS_OBJS_64) \
|
||||
builtins-c-32.cpp builtins-c-64.cpp
|
||||
BISON_SRC=parse.yy
|
||||
FLEX_SRC=lex.ll
|
||||
|
||||
OBJS=$(addprefix objs/, $(CXX_SRC:.cpp=.o) $(BUILTINS_OBJS) \
|
||||
stdlib_generic_ispc.o stdlib_x86_ispc.o \
|
||||
stdlib_mask1_ispc.o stdlib_mask8_ispc.o stdlib_mask16_ispc.o stdlib_mask32_ispc.o stdlib_mask64_ispc.o \
|
||||
$(BISON_SRC:.yy=.o) $(FLEX_SRC:.ll=.o))
|
||||
|
||||
default: ispc
|
||||
|
||||
.PHONY: dirs clean depend doxygen print_llvm_src
|
||||
.PHONY: dirs clean depend doxygen print_llvm_src llvm_check
|
||||
.PRECIOUS: objs/builtins-%.cpp
|
||||
|
||||
depend: $(CXX_SRC) $(HEADERS)
|
||||
depend: llvm_check $(CXX_SRC) $(HEADERS)
|
||||
@echo Updating dependencies
|
||||
@gcc -MM $(CXXFLAGS) $(CXX_SRC) | sed 's_^\([a-z]\)_objs/\1_g' > depend
|
||||
@$(CXX) -MM $(CXXFLAGS) $(CXX_SRC) | sed 's_^\([a-z]\)_objs/\1_g' > depend
|
||||
|
||||
-include depend
|
||||
|
||||
@@ -102,8 +240,18 @@ dirs:
|
||||
@echo Creating objs/ directory
|
||||
@/bin/mkdir -p objs
|
||||
|
||||
print_llvm_src:
|
||||
llvm_check:
|
||||
@llvm-config --version > /dev/null || \
|
||||
(echo; \
|
||||
echo "******************************************"; \
|
||||
echo "ERROR: llvm-config not found in your PATH"; \
|
||||
echo "******************************************"; \
|
||||
echo; exit 1)
|
||||
@echo -e '$(subst $(newline), ,$(RIGHT_LLVM))'
|
||||
|
||||
print_llvm_src: llvm_check
|
||||
@echo Using LLVM `llvm-config --version` from `llvm-config --libdir`
|
||||
@echo Using compiler to build: `$(CXX) --version | head -1`
|
||||
|
||||
clean:
|
||||
/bin/rm -rf objs ispc
|
||||
@@ -114,7 +262,25 @@ doxygen:
|
||||
|
||||
ispc: print_llvm_src dirs $(OBJS)
|
||||
@echo Creating ispc executable
|
||||
@$(CXX) $(LDFLAGS) -o $@ $(OBJS) $(ISPC_LIBS)
|
||||
@$(CXX) $(OPT) $(LDFLAGS) -o $@ $(OBJS) $(ISPC_LIBS)
|
||||
|
||||
# Use clang as a default compiler, instead of gcc
|
||||
# This is default now.
|
||||
clang: ispc
|
||||
clang: CXX=clang++
|
||||
|
||||
# Use gcc as a default compiler
|
||||
gcc: ispc
|
||||
gcc: CXX=g++
|
||||
|
||||
# Build ispc with address sanitizer instrumentation using clang compiler
|
||||
# Note that this is not portable build
|
||||
asan: clang
|
||||
asan: OPT+=-fsanitize=address
|
||||
|
||||
# Do debug build, i.e. -O0 -g
|
||||
debug: ispc
|
||||
debug: OPT=-O0 -g
|
||||
|
||||
objs/%.o: %.cpp
|
||||
@echo Compiling $<
|
||||
@@ -124,6 +290,10 @@ objs/cbackend.o: cbackend.cpp
|
||||
@echo Compiling $<
|
||||
@$(CXX) -fno-rtti -fno-exceptions $(CXXFLAGS) -o $@ -c $<
|
||||
|
||||
objs/opt.o: opt.cpp
|
||||
@echo Compiling $<
|
||||
@$(CXX) -fno-rtti $(CXXFLAGS) -o $@ -c $<
|
||||
|
||||
objs/%.o: objs/%.cpp
|
||||
@echo Compiling $<
|
||||
@$(CXX) $(CXXFLAGS) -o $@ -c $<
|
||||
@@ -144,24 +314,47 @@ objs/lex.o: objs/lex.cpp $(HEADERS) objs/parse.cc
|
||||
@echo Compiling $<
|
||||
@$(CXX) $(CXXFLAGS) -o $@ -c $<
|
||||
|
||||
objs/builtins-%.cpp: builtins/%.ll builtins/util.m4 $(wildcard builtins/*common.ll)
|
||||
objs/builtins-dispatch.cpp: builtins/dispatch.ll builtins/util.m4 builtins/util-nvptx.m4 builtins/svml.m4 $(wildcard builtins/*common.ll)
|
||||
@echo Creating C++ source from builtins definition file $<
|
||||
@m4 -Ibuiltins/ -DLLVM_VERSION=$(LLVM_VERSION) $< | python bitcode2cpp.py $< > $@
|
||||
@m4 -Ibuiltins/ -DLLVM_VERSION=$(LLVM_VERSION) -DBUILD_OS=UNIX $< | python bitcode2cpp.py $< > $@
|
||||
|
||||
objs/builtins-%-32bit.cpp: builtins/%.ll builtins/util.m4 builtins/util-nvptx.m4 builtins/svml.m4 $(wildcard builtins/*common.ll)
|
||||
@echo Creating C++ source from builtins definition file $< \(32 bit version\)
|
||||
@m4 -Ibuiltins/ -DLLVM_VERSION=$(LLVM_VERSION) -DBUILD_OS=UNIX -DRUNTIME=32 $< | python bitcode2cpp.py $< 32bit > $@
|
||||
|
||||
objs/builtins-%-64bit.cpp: builtins/%.ll builtins/util.m4 builtins/util-nvptx.m4 builtins/svml.m4 $(wildcard builtins/*common.ll)
|
||||
@echo Creating C++ source from builtins definition file $< \(64 bit version\)
|
||||
@m4 -Ibuiltins/ -DLLVM_VERSION=$(LLVM_VERSION) -DBUILD_OS=UNIX -DRUNTIME=64 $< | python bitcode2cpp.py $< 64bit > $@
|
||||
|
||||
objs/builtins-c-32.cpp: builtins/builtins.c
|
||||
@echo Creating C++ source from builtins definition file $<
|
||||
@$(CLANG) -m32 -emit-llvm -c $< -o - | llvm-dis - | python bitcode2cpp.py c-32 > $@
|
||||
@$(CLANG) -m32 -emit-llvm -c $< -o - | llvm-dis - | python bitcode2cpp.py c 32 > $@
|
||||
|
||||
objs/builtins-c-64.cpp: builtins/builtins.c
|
||||
@echo Creating C++ source from builtins definition file $<
|
||||
@$(CLANG) -m64 -emit-llvm -c $< -o - | llvm-dis - | python bitcode2cpp.py c-64 > $@
|
||||
@$(CLANG) -m64 -emit-llvm -c $< -o - | llvm-dis - | python bitcode2cpp.py c 64 > $@
|
||||
|
||||
objs/stdlib_generic_ispc.cpp: stdlib.ispc
|
||||
@echo Creating C++ source from $< for generic
|
||||
@$(CLANG) -E -x c -DISPC_TARGET_GENERIC=1 -DISPC=1 -DPI=3.1415926536 $< -o - | \
|
||||
python stdlib2cpp.py generic > $@
|
||||
objs/stdlib_mask1_ispc.cpp: stdlib.ispc
|
||||
@echo Creating C++ source from $< for mask1
|
||||
@$(CLANG) -E -x c -DISPC_MASK_BITS=1 -DISPC=1 -DPI=3.14159265358979 $< -o - | \
|
||||
python stdlib2cpp.py mask1 > $@
|
||||
|
||||
objs/stdlib_x86_ispc.cpp: stdlib.ispc
|
||||
@echo Creating C++ source from $< for x86
|
||||
@$(CLANG) -E -x c -DISPC=1 -DPI=3.1415926536 $< -o - | \
|
||||
python stdlib2cpp.py x86 > $@
|
||||
objs/stdlib_mask8_ispc.cpp: stdlib.ispc
|
||||
@echo Creating C++ source from $< for mask8
|
||||
@$(CLANG) -E -x c -DISPC_MASK_BITS=8 -DISPC=1 -DPI=3.14159265358979 $< -o - | \
|
||||
python stdlib2cpp.py mask8 > $@
|
||||
|
||||
objs/stdlib_mask16_ispc.cpp: stdlib.ispc
|
||||
@echo Creating C++ source from $< for mask16
|
||||
@$(CLANG) -E -x c -DISPC_MASK_BITS=16 -DISPC=1 -DPI=3.14159265358979 $< -o - | \
|
||||
python stdlib2cpp.py mask16 > $@
|
||||
|
||||
objs/stdlib_mask32_ispc.cpp: stdlib.ispc
|
||||
@echo Creating C++ source from $< for mask32
|
||||
@$(CLANG) -E -x c -DISPC_MASK_BITS=32 -DISPC=1 -DPI=3.14159265358979 $< -o - | \
|
||||
python stdlib2cpp.py mask32 > $@
|
||||
|
||||
objs/stdlib_mask64_ispc.cpp: stdlib.ispc
|
||||
@echo Creating C++ source from $< for mask64
|
||||
@$(CLANG) -E -x c -DISPC_MASK_BITS=64 -DISPC=1 -DPI=3.14159265358979 $< -o - | \
|
||||
python stdlib2cpp.py mask64 > $@
|
||||
|
||||
@@ -47,7 +47,7 @@ remarkable `LLVM Compiler Infrastructure <http://llvm.org>`_ for back-end
|
||||
code generation and optimization and is `hosted on
|
||||
github <http://github.com/ispc/ispc/>`_. It supports Windows, Mac, and
|
||||
Linux, with both x86 and x86-64 targets. It currently supports the SSE2,
|
||||
SSE4, and AVX instruction sets.
|
||||
SSE4, AVX1, and AVX2 instruction sets.
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
341
ast.cpp
341
ast.cpp
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright (c) 2011, Intel Corporation
|
||||
Copyright (c) 2011-2015, Intel Corporation
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
@@ -28,12 +28,14 @@
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
/** @file ast.cpp
|
||||
@brief
|
||||
*/
|
||||
|
||||
@brief General functionality related to abstract syntax trees and
|
||||
traversal of them.
|
||||
*/
|
||||
|
||||
#include "ast.h"
|
||||
#include "expr.h"
|
||||
@@ -53,10 +55,17 @@ ASTNode::~ASTNode() {
|
||||
// AST
|
||||
|
||||
void
|
||||
AST::AddFunction(Symbol *sym, const std::vector<Symbol *> &args, Stmt *code) {
|
||||
AST::AddFunction(Symbol *sym, Stmt *code) {
|
||||
if (sym == NULL)
|
||||
return;
|
||||
functions.push_back(new Function(sym, args, code));
|
||||
|
||||
Function *f = new Function(sym, code);
|
||||
|
||||
if (f->IsPolyFunction()) {
|
||||
FATAL("This is a good start, but implement me!");
|
||||
} else {
|
||||
functions.push_back(f);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -83,79 +92,105 @@ WalkAST(ASTNode *node, ASTPreCallBackFunc preFunc, ASTPostCallBackFunc postFunc,
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
// Handle Statements
|
||||
if (dynamic_cast<Stmt *>(node) != NULL) {
|
||||
if (llvm::dyn_cast<Stmt>(node) != NULL) {
|
||||
ExprStmt *es;
|
||||
DeclStmt *ds;
|
||||
IfStmt *is;
|
||||
DoStmt *dos;
|
||||
ForStmt *fs;
|
||||
ForeachStmt *fes;
|
||||
ForeachActiveStmt *fas;
|
||||
ForeachUniqueStmt *fus;
|
||||
CaseStmt *cs;
|
||||
DefaultStmt *defs;
|
||||
SwitchStmt *ss;
|
||||
ReturnStmt *rs;
|
||||
LabeledStmt *ls;
|
||||
StmtList *sl;
|
||||
PrintStmt *ps;
|
||||
AssertStmt *as;
|
||||
DeleteStmt *dels;
|
||||
UnmaskedStmt *ums;
|
||||
|
||||
if ((es = dynamic_cast<ExprStmt *>(node)) != NULL)
|
||||
if ((es = llvm::dyn_cast<ExprStmt>(node)) != NULL)
|
||||
es->expr = (Expr *)WalkAST(es->expr, preFunc, postFunc, data);
|
||||
else if ((ds = dynamic_cast<DeclStmt *>(node)) != NULL) {
|
||||
else if ((ds = llvm::dyn_cast<DeclStmt>(node)) != NULL) {
|
||||
for (unsigned int i = 0; i < ds->vars.size(); ++i)
|
||||
ds->vars[i].init = (Expr *)WalkAST(ds->vars[i].init, preFunc,
|
||||
ds->vars[i].init = (Expr *)WalkAST(ds->vars[i].init, preFunc,
|
||||
postFunc, data);
|
||||
}
|
||||
else if ((is = dynamic_cast<IfStmt *>(node)) != NULL) {
|
||||
else if ((is = llvm::dyn_cast<IfStmt>(node)) != NULL) {
|
||||
is->test = (Expr *)WalkAST(is->test, preFunc, postFunc, data);
|
||||
is->trueStmts = (Stmt *)WalkAST(is->trueStmts, preFunc,
|
||||
is->trueStmts = (Stmt *)WalkAST(is->trueStmts, preFunc,
|
||||
postFunc, data);
|
||||
is->falseStmts = (Stmt *)WalkAST(is->falseStmts, preFunc,
|
||||
is->falseStmts = (Stmt *)WalkAST(is->falseStmts, preFunc,
|
||||
postFunc, data);
|
||||
}
|
||||
else if ((dos = dynamic_cast<DoStmt *>(node)) != NULL) {
|
||||
dos->testExpr = (Expr *)WalkAST(dos->testExpr, preFunc,
|
||||
else if ((dos = llvm::dyn_cast<DoStmt>(node)) != NULL) {
|
||||
dos->testExpr = (Expr *)WalkAST(dos->testExpr, preFunc,
|
||||
postFunc, data);
|
||||
dos->bodyStmts = (Stmt *)WalkAST(dos->bodyStmts, preFunc,
|
||||
dos->bodyStmts = (Stmt *)WalkAST(dos->bodyStmts, preFunc,
|
||||
postFunc, data);
|
||||
}
|
||||
else if ((fs = dynamic_cast<ForStmt *>(node)) != NULL) {
|
||||
else if ((fs = llvm::dyn_cast<ForStmt>(node)) != NULL) {
|
||||
fs->init = (Stmt *)WalkAST(fs->init, preFunc, postFunc, data);
|
||||
fs->test = (Expr *)WalkAST(fs->test, preFunc, postFunc, data);
|
||||
fs->step = (Stmt *)WalkAST(fs->step, preFunc, postFunc, data);
|
||||
fs->stmts = (Stmt *)WalkAST(fs->stmts, preFunc, postFunc, data);
|
||||
}
|
||||
else if ((fes = dynamic_cast<ForeachStmt *>(node)) != NULL) {
|
||||
else if ((fes = llvm::dyn_cast<ForeachStmt>(node)) != NULL) {
|
||||
for (unsigned int i = 0; i < fes->startExprs.size(); ++i)
|
||||
fes->startExprs[i] = (Expr *)WalkAST(fes->startExprs[i], preFunc,
|
||||
fes->startExprs[i] = (Expr *)WalkAST(fes->startExprs[i], preFunc,
|
||||
postFunc, data);
|
||||
for (unsigned int i = 0; i < fes->endExprs.size(); ++i)
|
||||
fes->endExprs[i] = (Expr *)WalkAST(fes->endExprs[i], preFunc,
|
||||
fes->endExprs[i] = (Expr *)WalkAST(fes->endExprs[i], preFunc,
|
||||
postFunc, data);
|
||||
fes->stmts = (Stmt *)WalkAST(fes->stmts, preFunc, postFunc, data);
|
||||
}
|
||||
else if (dynamic_cast<BreakStmt *>(node) != NULL ||
|
||||
dynamic_cast<ContinueStmt *>(node) != NULL ||
|
||||
dynamic_cast<GotoStmt *>(node) != NULL) {
|
||||
else if ((fas = llvm::dyn_cast<ForeachActiveStmt>(node)) != NULL) {
|
||||
fas->stmts = (Stmt *)WalkAST(fas->stmts, preFunc, postFunc, data);
|
||||
}
|
||||
else if ((fus = llvm::dyn_cast<ForeachUniqueStmt>(node)) != NULL) {
|
||||
fus->expr = (Expr *)WalkAST(fus->expr, preFunc, postFunc, data);
|
||||
fus->stmts = (Stmt *)WalkAST(fus->stmts, preFunc, postFunc, data);
|
||||
}
|
||||
else if ((cs = llvm::dyn_cast<CaseStmt>(node)) != NULL)
|
||||
cs->stmts = (Stmt *)WalkAST(cs->stmts, preFunc, postFunc, data);
|
||||
else if ((defs = llvm::dyn_cast<DefaultStmt>(node)) != NULL)
|
||||
defs->stmts = (Stmt *)WalkAST(defs->stmts, preFunc, postFunc, data);
|
||||
else if ((ss = llvm::dyn_cast<SwitchStmt>(node)) != NULL) {
|
||||
ss->expr = (Expr *)WalkAST(ss->expr, preFunc, postFunc, data);
|
||||
ss->stmts = (Stmt *)WalkAST(ss->stmts, preFunc, postFunc, data);
|
||||
}
|
||||
else if (llvm::dyn_cast<BreakStmt>(node) != NULL ||
|
||||
llvm::dyn_cast<ContinueStmt>(node) != NULL ||
|
||||
llvm::dyn_cast<GotoStmt>(node) != NULL) {
|
||||
// nothing
|
||||
}
|
||||
else if ((ls = dynamic_cast<LabeledStmt *>(node)) != NULL)
|
||||
else if ((ls = llvm::dyn_cast<LabeledStmt>(node)) != NULL)
|
||||
ls->stmt = (Stmt *)WalkAST(ls->stmt, preFunc, postFunc, data);
|
||||
else if ((rs = dynamic_cast<ReturnStmt *>(node)) != NULL)
|
||||
rs->val = (Expr *)WalkAST(rs->val, preFunc, postFunc, data);
|
||||
else if ((sl = dynamic_cast<StmtList *>(node)) != NULL) {
|
||||
else if ((rs = llvm::dyn_cast<ReturnStmt>(node)) != NULL)
|
||||
rs->expr = (Expr *)WalkAST(rs->expr, preFunc, postFunc, data);
|
||||
else if ((sl = llvm::dyn_cast<StmtList>(node)) != NULL) {
|
||||
std::vector<Stmt *> &sls = sl->stmts;
|
||||
for (unsigned int i = 0; i < sls.size(); ++i)
|
||||
sls[i] = (Stmt *)WalkAST(sls[i], preFunc, postFunc, data);
|
||||
}
|
||||
else if ((ps = dynamic_cast<PrintStmt *>(node)) != NULL)
|
||||
else if ((ps = llvm::dyn_cast<PrintStmt>(node)) != NULL)
|
||||
ps->values = (Expr *)WalkAST(ps->values, preFunc, postFunc, data);
|
||||
else if ((as = dynamic_cast<AssertStmt *>(node)) != NULL)
|
||||
else if ((as = llvm::dyn_cast<AssertStmt>(node)) != NULL)
|
||||
as->expr = (Expr *)WalkAST(as->expr, preFunc, postFunc, data);
|
||||
else if ((dels = llvm::dyn_cast<DeleteStmt>(node)) != NULL)
|
||||
dels->expr = (Expr *)WalkAST(dels->expr, preFunc, postFunc, data);
|
||||
else if ((ums = llvm::dyn_cast<UnmaskedStmt>(node)) != NULL)
|
||||
ums->stmts = (Stmt *)WalkAST(ums->stmts, preFunc, postFunc, data);
|
||||
else
|
||||
FATAL("Unhandled statement type in WalkAST()");
|
||||
}
|
||||
else {
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// Handle expressions
|
||||
Assert(dynamic_cast<Expr *>(node) != NULL);
|
||||
Assert(llvm::dyn_cast<Expr>(node) != NULL);
|
||||
UnaryExpr *ue;
|
||||
BinaryExpr *be;
|
||||
AssignExpr *ae;
|
||||
@@ -166,60 +201,73 @@ WalkAST(ASTNode *node, ASTPreCallBackFunc preFunc, ASTPostCallBackFunc postFunc,
|
||||
MemberExpr *me;
|
||||
TypeCastExpr *tce;
|
||||
ReferenceExpr *re;
|
||||
DereferenceExpr *dre;
|
||||
PtrDerefExpr *ptrderef;
|
||||
RefDerefExpr *refderef;
|
||||
SizeOfExpr *soe;
|
||||
AddressOfExpr *aoe;
|
||||
NewExpr *newe;
|
||||
|
||||
if ((ue = dynamic_cast<UnaryExpr *>(node)) != NULL)
|
||||
if ((ue = llvm::dyn_cast<UnaryExpr>(node)) != NULL)
|
||||
ue->expr = (Expr *)WalkAST(ue->expr, preFunc, postFunc, data);
|
||||
else if ((be = dynamic_cast<BinaryExpr *>(node)) != NULL) {
|
||||
else if ((be = llvm::dyn_cast<BinaryExpr>(node)) != NULL) {
|
||||
be->arg0 = (Expr *)WalkAST(be->arg0, preFunc, postFunc, data);
|
||||
be->arg1 = (Expr *)WalkAST(be->arg1, preFunc, postFunc, data);
|
||||
}
|
||||
else if ((ae = dynamic_cast<AssignExpr *>(node)) != NULL) {
|
||||
else if ((ae = llvm::dyn_cast<AssignExpr>(node)) != NULL) {
|
||||
ae->lvalue = (Expr *)WalkAST(ae->lvalue, preFunc, postFunc, data);
|
||||
ae->rvalue = (Expr *)WalkAST(ae->rvalue, preFunc, postFunc, data);
|
||||
}
|
||||
else if ((se = dynamic_cast<SelectExpr *>(node)) != NULL) {
|
||||
else if ((se = llvm::dyn_cast<SelectExpr>(node)) != NULL) {
|
||||
se->test = (Expr *)WalkAST(se->test, preFunc, postFunc, data);
|
||||
se->expr1 = (Expr *)WalkAST(se->expr1, preFunc, postFunc, data);
|
||||
se->expr2 = (Expr *)WalkAST(se->expr2, preFunc, postFunc, data);
|
||||
}
|
||||
else if ((el = dynamic_cast<ExprList *>(node)) != NULL) {
|
||||
else if ((el = llvm::dyn_cast<ExprList>(node)) != NULL) {
|
||||
for (unsigned int i = 0; i < el->exprs.size(); ++i)
|
||||
el->exprs[i] = (Expr *)WalkAST(el->exprs[i], preFunc,
|
||||
el->exprs[i] = (Expr *)WalkAST(el->exprs[i], preFunc,
|
||||
postFunc, data);
|
||||
}
|
||||
else if ((fce = dynamic_cast<FunctionCallExpr *>(node)) != NULL) {
|
||||
else if ((fce = llvm::dyn_cast<FunctionCallExpr>(node)) != NULL) {
|
||||
fce->func = (Expr *)WalkAST(fce->func, preFunc, postFunc, data);
|
||||
fce->args = (ExprList *)WalkAST(fce->args, preFunc, postFunc, data);
|
||||
fce->launchCountExpr = (Expr *)WalkAST(fce->launchCountExpr, preFunc,
|
||||
for (int k = 0; k < 3; k++)
|
||||
fce->launchCountExpr[0] = (Expr *)WalkAST(fce->launchCountExpr[0], preFunc,
|
||||
postFunc, data);
|
||||
}
|
||||
else if ((ie = dynamic_cast<IndexExpr *>(node)) != NULL) {
|
||||
else if ((ie = llvm::dyn_cast<IndexExpr>(node)) != NULL) {
|
||||
ie->baseExpr = (Expr *)WalkAST(ie->baseExpr, preFunc, postFunc, data);
|
||||
ie->index = (Expr *)WalkAST(ie->index, preFunc, postFunc, data);
|
||||
}
|
||||
else if ((me = dynamic_cast<MemberExpr *>(node)) != NULL)
|
||||
else if ((me = llvm::dyn_cast<MemberExpr>(node)) != NULL)
|
||||
me->expr = (Expr *)WalkAST(me->expr, preFunc, postFunc, data);
|
||||
else if ((tce = dynamic_cast<TypeCastExpr *>(node)) != NULL)
|
||||
else if ((tce = llvm::dyn_cast<TypeCastExpr>(node)) != NULL)
|
||||
tce->expr = (Expr *)WalkAST(tce->expr, preFunc, postFunc, data);
|
||||
else if ((re = dynamic_cast<ReferenceExpr *>(node)) != NULL)
|
||||
else if ((re = llvm::dyn_cast<ReferenceExpr>(node)) != NULL)
|
||||
re->expr = (Expr *)WalkAST(re->expr, preFunc, postFunc, data);
|
||||
else if ((dre = dynamic_cast<DereferenceExpr *>(node)) != NULL)
|
||||
dre->expr = (Expr *)WalkAST(dre->expr, preFunc, postFunc, data);
|
||||
else if ((soe = dynamic_cast<SizeOfExpr *>(node)) != NULL)
|
||||
else if ((ptrderef = llvm::dyn_cast<PtrDerefExpr>(node)) != NULL)
|
||||
ptrderef->expr = (Expr *)WalkAST(ptrderef->expr, preFunc, postFunc,
|
||||
data);
|
||||
else if ((refderef = llvm::dyn_cast<RefDerefExpr>(node)) != NULL)
|
||||
refderef->expr = (Expr *)WalkAST(refderef->expr, preFunc, postFunc,
|
||||
data);
|
||||
else if ((soe = llvm::dyn_cast<SizeOfExpr>(node)) != NULL)
|
||||
soe->expr = (Expr *)WalkAST(soe->expr, preFunc, postFunc, data);
|
||||
else if ((aoe = dynamic_cast<AddressOfExpr *>(node)) != NULL)
|
||||
else if ((aoe = llvm::dyn_cast<AddressOfExpr>(node)) != NULL)
|
||||
aoe->expr = (Expr *)WalkAST(aoe->expr, preFunc, postFunc, data);
|
||||
else if (dynamic_cast<SymbolExpr *>(node) != NULL ||
|
||||
dynamic_cast<ConstExpr *>(node) != NULL ||
|
||||
dynamic_cast<FunctionSymbolExpr *>(node) != NULL ||
|
||||
dynamic_cast<SyncExpr *>(node) != NULL ||
|
||||
dynamic_cast<NullPointerExpr *>(node) != NULL) {
|
||||
// nothing to do
|
||||
else if ((newe = llvm::dyn_cast<NewExpr>(node)) != NULL) {
|
||||
newe->countExpr = (Expr *)WalkAST(newe->countExpr, preFunc,
|
||||
postFunc, data);
|
||||
newe->initExpr = (Expr *)WalkAST(newe->initExpr, preFunc,
|
||||
postFunc, data);
|
||||
}
|
||||
else
|
||||
else if (llvm::dyn_cast<SymbolExpr>(node) != NULL ||
|
||||
llvm::dyn_cast<ConstExpr>(node) != NULL ||
|
||||
llvm::dyn_cast<FunctionSymbolExpr>(node) != NULL ||
|
||||
llvm::dyn_cast<SyncExpr>(node) != NULL ||
|
||||
llvm::dyn_cast<NullPointerExpr>(node) != NULL) {
|
||||
// nothing to do
|
||||
}
|
||||
else
|
||||
FATAL("Unhandled expression type in WalkAST().");
|
||||
}
|
||||
|
||||
@@ -279,18 +327,191 @@ TypeCheck(Stmt *stmt) {
|
||||
}
|
||||
|
||||
|
||||
struct CostData {
|
||||
CostData() { cost = foreachDepth = 0; }
|
||||
|
||||
int cost;
|
||||
int foreachDepth;
|
||||
};
|
||||
|
||||
|
||||
static bool
|
||||
lCostCallback(ASTNode *node, void *c) {
|
||||
int *cost = (int *)c;
|
||||
*cost += node->EstimateCost();
|
||||
lCostCallbackPre(ASTNode *node, void *d) {
|
||||
CostData *data = (CostData *)d;
|
||||
if (llvm::dyn_cast<ForeachStmt>(node) != NULL)
|
||||
++data->foreachDepth;
|
||||
if (data->foreachDepth == 0)
|
||||
data->cost += node->EstimateCost();
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
static ASTNode *
|
||||
lCostCallbackPost(ASTNode *node, void *d) {
|
||||
CostData *data = (CostData *)d;
|
||||
if (llvm::dyn_cast<ForeachStmt>(node) != NULL)
|
||||
--data->foreachDepth;
|
||||
return node;
|
||||
}
|
||||
|
||||
|
||||
int
|
||||
EstimateCost(ASTNode *root) {
|
||||
int cost = 0;
|
||||
WalkAST(root, lCostCallback, NULL, &cost);
|
||||
return cost;
|
||||
CostData data;
|
||||
WalkAST(root, lCostCallbackPre, lCostCallbackPost, &data);
|
||||
return data.cost;
|
||||
}
|
||||
|
||||
|
||||
/** Given an AST node, check to see if it's safe if we happen to run the
|
||||
code for that node with the execution mask all off.
|
||||
*/
|
||||
static bool
|
||||
lCheckAllOffSafety(ASTNode *node, void *data) {
|
||||
bool *okPtr = (bool *)data;
|
||||
|
||||
FunctionCallExpr *fce;
|
||||
if ((fce = llvm::dyn_cast<FunctionCallExpr>(node)) != NULL) {
|
||||
if (fce->func == NULL)
|
||||
return false;
|
||||
|
||||
const Type *type = fce->func->GetType();
|
||||
const PointerType *pt = CastType<PointerType>(type);
|
||||
if (pt != NULL)
|
||||
type = pt->GetBaseType();
|
||||
const FunctionType *ftype = CastType<FunctionType>(type);
|
||||
Assert(ftype != NULL);
|
||||
|
||||
if (ftype->isSafe == false) {
|
||||
*okPtr = false;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if (llvm::dyn_cast<AssertStmt>(node) != NULL) {
|
||||
// While it's fine to run the assert for varying tests, it's not
|
||||
// desirable to check an assert on a uniform variable if all of the
|
||||
// lanes are off.
|
||||
*okPtr = false;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (llvm::dyn_cast<PrintStmt>(node) != NULL) {
|
||||
*okPtr = false;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (llvm::dyn_cast<NewExpr>(node) != NULL ||
|
||||
llvm::dyn_cast<DeleteStmt>(node) != NULL) {
|
||||
// We definitely don't want to run the uniform variants of these if
|
||||
// the mask is all off. It's also worth skipping the overhead of
|
||||
// executing the varying versions of them in the all-off mask case.
|
||||
*okPtr = false;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (llvm::dyn_cast<ForeachStmt>(node) != NULL ||
|
||||
llvm::dyn_cast<ForeachActiveStmt>(node) != NULL ||
|
||||
llvm::dyn_cast<ForeachUniqueStmt>(node) != NULL ||
|
||||
llvm::dyn_cast<UnmaskedStmt>(node) != NULL) {
|
||||
// The various foreach statements also shouldn't be run with an
|
||||
// all-off mask. Since they can re-establish an 'all on' mask,
|
||||
// this would be pretty unintuitive. (More generally, it's
|
||||
// possibly a little strange to allow foreach in the presence of
|
||||
// any non-uniform control flow...)
|
||||
//
|
||||
// Similarly, the implementation of foreach_unique assumes as a
|
||||
// precondition that the mask won't be all off going into it, so
|
||||
// we'll enforce that here...
|
||||
*okPtr = false;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (llvm::dyn_cast<BinaryExpr>(node) != NULL) {
|
||||
BinaryExpr* binaryExpr = llvm::dyn_cast<BinaryExpr>(node);
|
||||
if (binaryExpr->op == BinaryExpr::Mod || binaryExpr->op == BinaryExpr::Div) {
|
||||
*okPtr = false;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
IndexExpr *ie;
|
||||
if ((ie = llvm::dyn_cast<IndexExpr>(node)) != NULL && ie->baseExpr != NULL) {
|
||||
const Type *type = ie->baseExpr->GetType();
|
||||
if (type == NULL)
|
||||
return true;
|
||||
if (CastType<ReferenceType>(type) != NULL)
|
||||
type = type->GetReferenceTarget();
|
||||
|
||||
ConstExpr *ce = llvm::dyn_cast<ConstExpr>(ie->index);
|
||||
if (ce == NULL) {
|
||||
// indexing with a variable... -> not safe
|
||||
*okPtr = false;
|
||||
return false;
|
||||
}
|
||||
|
||||
const PointerType *pointerType = CastType<PointerType>(type);
|
||||
if (pointerType != NULL) {
|
||||
// pointer[index] -> can't be sure -> not safe
|
||||
*okPtr = false;
|
||||
return false;
|
||||
}
|
||||
|
||||
const SequentialType *seqType = CastType<SequentialType>(type);
|
||||
Assert(seqType != NULL);
|
||||
int nElements = seqType->GetElementCount();
|
||||
if (nElements == 0) {
|
||||
// Unsized array, so we can't be sure -> not safe
|
||||
*okPtr = false;
|
||||
return false;
|
||||
}
|
||||
|
||||
int32_t indices[ISPC_MAX_NVEC];
|
||||
int count = ce->GetValues(indices);
|
||||
for (int i = 0; i < count; ++i) {
|
||||
if (indices[i] < 0 || indices[i] >= nElements) {
|
||||
// Index is out of bounds -> not safe
|
||||
*okPtr = false;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// All indices are in-bounds
|
||||
return true;
|
||||
}
|
||||
|
||||
MemberExpr *me;
|
||||
if ((me = llvm::dyn_cast<MemberExpr>(node)) != NULL &&
|
||||
me->dereferenceExpr) {
|
||||
*okPtr = false;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (llvm::dyn_cast<PtrDerefExpr>(node) != NULL) {
|
||||
*okPtr = false;
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
Don't allow turning if/else to straight-line-code if we
|
||||
assign to a uniform.
|
||||
*/
|
||||
AssignExpr *ae;
|
||||
if ((ae = llvm::dyn_cast<AssignExpr>(node)) != NULL) {
|
||||
if (ae->GetType()) {
|
||||
if (ae->GetType()->IsUniformType()) {
|
||||
*okPtr = false;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
bool
|
||||
SafeToRunWithMaskAllOff(ASTNode *root) {
|
||||
bool safe = true;
|
||||
WalkAST(root, lCheckAllOffSafety, NULL, &safe);
|
||||
return safe;
|
||||
}
|
||||
|
||||
86
ast.h
86
ast.h
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright (c) 2011, Intel Corporation
|
||||
Copyright (c) 2011-2013, Intel Corporation
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
@@ -28,11 +28,11 @@
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
/** @file ast.h
|
||||
@brief
|
||||
@brief
|
||||
*/
|
||||
|
||||
#ifndef ISPC_AST_H
|
||||
@@ -48,8 +48,9 @@
|
||||
(Expr) and statements (Stmt) inherit from this class.
|
||||
*/
|
||||
class ASTNode {
|
||||
const unsigned char SubclassID; // Subclass identifier (for isa/dyn_cast)
|
||||
public:
|
||||
ASTNode(SourcePos p) : pos(p) { }
|
||||
ASTNode(SourcePos p, unsigned scid) : SubclassID(scid), pos(p) { }
|
||||
virtual ~ASTNode();
|
||||
|
||||
/** The Optimize() method should perform any appropriate early-stage
|
||||
@@ -74,18 +75,77 @@ public:
|
||||
/** All AST nodes must track the file position where they are
|
||||
defined. */
|
||||
SourcePos pos;
|
||||
|
||||
/** An enumeration for keeping track of the concrete subclass of Value
|
||||
that is actually instantiated.*/
|
||||
enum ASTNodeTy {
|
||||
/* For classes inherited from Expr */
|
||||
AddressOfExprID,
|
||||
AssignExprID,
|
||||
BinaryExprID,
|
||||
ConstExprID,
|
||||
DerefExprID,
|
||||
PtrDerefExprID,
|
||||
RefDerefExprID,
|
||||
ExprListID,
|
||||
FunctionCallExprID,
|
||||
FunctionSymbolExprID,
|
||||
IndexExprID,
|
||||
StructMemberExprID,
|
||||
VectorMemberExprID,
|
||||
NewExprID,
|
||||
NullPointerExprID,
|
||||
ReferenceExprID,
|
||||
SelectExprID,
|
||||
SizeOfExprID,
|
||||
SymbolExprID,
|
||||
SyncExprID,
|
||||
TypeCastExprID,
|
||||
UnaryExprID,
|
||||
/* This is a convenience separator to shorten classof implementations */
|
||||
MaxExprID,
|
||||
/* For classes inherited from Stmt */
|
||||
AssertStmtID,
|
||||
BreakStmtID,
|
||||
CaseStmtID,
|
||||
ContinueStmtID,
|
||||
DeclStmtID,
|
||||
DefaultStmtID,
|
||||
DeleteStmtID,
|
||||
DoStmtID,
|
||||
ExprStmtID,
|
||||
ForeachActiveStmtID,
|
||||
ForeachStmtID,
|
||||
ForeachUniqueStmtID,
|
||||
ForStmtID,
|
||||
GotoStmtID,
|
||||
IfStmtID,
|
||||
LabeledStmtID,
|
||||
PrintStmtID,
|
||||
ReturnStmtID,
|
||||
StmtListID,
|
||||
SwitchStmtID,
|
||||
UnmaskedStmtID
|
||||
};
|
||||
|
||||
/** Return an ID for the concrete type of this object. This is used to
|
||||
implement the classof checks. This should not be used for any
|
||||
other purpose, as the values may change as ISPC evolves */
|
||||
unsigned getValueID() const {
|
||||
return SubclassID;
|
||||
}
|
||||
|
||||
static inline bool classof(ASTNode const*) { return true; }
|
||||
};
|
||||
|
||||
|
||||
/** Simple representation of the abstract syntax trees for all of the
|
||||
functions declared in a compilation unit.
|
||||
*/
|
||||
|
||||
|
||||
class AST {
|
||||
public:
|
||||
/** Add the AST for a function described by the given declaration
|
||||
information and source code. */
|
||||
void AddFunction(Symbol *sym, const std::vector<Symbol *> &args,
|
||||
Stmt *code);
|
||||
void AddFunction(Symbol *sym, Stmt *code);
|
||||
|
||||
/** Generate LLVM IR for all of the functions into the current
|
||||
module. */
|
||||
@@ -122,12 +182,12 @@ extern ASTNode *Optimize(ASTNode *root);
|
||||
|
||||
/** Convenience version of Optimize() for Expr *s that returns an Expr *
|
||||
(rather than an ASTNode *, which would require the caller to cast back
|
||||
to an Expr *). */
|
||||
to an Expr *). */
|
||||
extern Expr *Optimize(Expr *);
|
||||
|
||||
/** Convenience version of Optimize() for Expr *s that returns an Stmt *
|
||||
(rather than an ASTNode *, which would require the caller to cast back
|
||||
to a Stmt *). */
|
||||
to a Stmt *). */
|
||||
extern Stmt *Optimize(Stmt *);
|
||||
|
||||
/** Perform type-checking on the given AST (or portion of one), returning a
|
||||
@@ -144,4 +204,8 @@ extern Stmt *TypeCheck(Stmt *);
|
||||
the given root. */
|
||||
extern int EstimateCost(ASTNode *root);
|
||||
|
||||
/** Returns true if it would be safe to run the given code with an "all
|
||||
off" mask. */
|
||||
extern bool SafeToRunWithMaskAllOff(ASTNode *root);
|
||||
|
||||
#endif // ISPC_AST_H
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
import sys
|
||||
import string
|
||||
import re
|
||||
import subprocess
|
||||
import platform
|
||||
@@ -10,6 +9,8 @@ import os
|
||||
length=0
|
||||
|
||||
src=str(sys.argv[1])
|
||||
if (len(sys.argv) > 2):
|
||||
runtime=str(sys.argv[2])
|
||||
|
||||
target = re.sub("builtins/target-", "", src)
|
||||
target = re.sub(r"builtins\\target-", "", target)
|
||||
@@ -20,23 +21,30 @@ target = re.sub("\.c$", "", target)
|
||||
target = re.sub("-", "_", target)
|
||||
|
||||
llvm_as="llvm-as"
|
||||
if platform.system() == 'Windows' or string.find(platform.system(), "CYGWIN_NT") != -1:
|
||||
if platform.system() == 'Windows' or platform.system().find("CYGWIN_NT") != -1:
|
||||
llvm_as = os.getenv("LLVM_INSTALL_DIR").replace("\\", "/") + "/bin/" + llvm_as
|
||||
|
||||
try:
|
||||
as_out=subprocess.Popen([llvm_as, "-", "-o", "-"], stdout=subprocess.PIPE)
|
||||
except IOError:
|
||||
print >> sys.stderr, "Couldn't open " + src
|
||||
sys.stderr.write("Couldn't open " + src)
|
||||
sys.exit(1)
|
||||
|
||||
print "unsigned char builtins_bitcode_" + target + "[] = {"
|
||||
for line in as_out.stdout.readlines():
|
||||
length = length + len(line)
|
||||
for c in line:
|
||||
print ord(c)
|
||||
print ", "
|
||||
print " 0 };\n\n"
|
||||
print "int builtins_bitcode_" + target + "_length = " + str(length) + ";\n"
|
||||
name = target
|
||||
if (len(sys.argv) > 2):
|
||||
name += "_" + runtime;
|
||||
width = 16;
|
||||
sys.stdout.write("unsigned char builtins_bitcode_" + name + "[] = {\n")
|
||||
|
||||
data = as_out.stdout.read()
|
||||
for i in range(0, len(data), 1):
|
||||
sys.stdout.write("0x%0.2X, " % ord(data[i:i+1]))
|
||||
|
||||
if i%width == (width-1):
|
||||
sys.stdout.write("\n")
|
||||
|
||||
sys.stdout.write("0x00 };\n\n")
|
||||
sys.stdout.write("int builtins_bitcode_" + name + "_length = " + str(len(data)) + ";\n")
|
||||
|
||||
as_out.wait()
|
||||
|
||||
|
||||
@@ -2,8 +2,8 @@
|
||||
|
||||
REM If LLVM_INSTALL_DIR isn't set globally in your environment,
|
||||
REM it can be set here_
|
||||
set LLVM_INSTALL_DIR=c:\users\mmp\llvm-dev
|
||||
set LLVM_VERSION=3.1svn
|
||||
REM set LLVM_INSTALL_DIR=c:\users\mmp\llvm-dev
|
||||
REM set LLVM_VERSION=LLVM_3_2
|
||||
|
||||
REM Both the LLVM binaries and python need to be in the path
|
||||
set path=%LLVM_INSTALL_DIR%\bin;%PATH%;c:\cygwin\bin
|
||||
|
||||
1078
builtins.cpp
1078
builtins.cpp
File diff suppressed because it is too large
Load Diff
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright (c) 2010-2011, Intel Corporation
|
||||
Copyright (c) 2010-2015, Intel Corporation
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
@@ -28,11 +28,11 @@
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
/** @file builtins.h
|
||||
@brief Declarations of functions related to builtins and the
|
||||
@brief Declarations of functions related to builtins and the
|
||||
standard library
|
||||
*/
|
||||
|
||||
@@ -56,6 +56,7 @@ void DefineStdlib(SymbolTable *symbolTable, llvm::LLVMContext *ctx, llvm::Module
|
||||
bool includeStdlib);
|
||||
|
||||
void AddBitcodeToModule(const unsigned char *bitcode, int length,
|
||||
llvm::Module *module, SymbolTable *symbolTable = NULL);
|
||||
llvm::Module *module, SymbolTable *symbolTable = NULL,
|
||||
bool warn = true);
|
||||
|
||||
#endif // ISPC_STDLIB_H
|
||||
|
||||
163
builtins/__do_print_nvptx.cu
Normal file
163
builtins/__do_print_nvptx.cu
Normal file
@@ -0,0 +1,163 @@
|
||||
/*
|
||||
Copyright (c) 2014-2015, Intel Corporation
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
|
||||
* Neither the name of Intel Corporation nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
||||
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
||||
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
|
||||
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
|
||||
OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <cstdio>
|
||||
|
||||
#define PRINT_BUF_SIZE 4096
|
||||
#define uint64_t unsigned long long
|
||||
|
||||
static __device__ size_t d_strlen(const char *str)
|
||||
{
|
||||
const char *s;
|
||||
|
||||
for (s = str; *s; ++s)
|
||||
;
|
||||
return (s - str);
|
||||
}
|
||||
|
||||
static __device__ char* d_strncat(char *dest, const char *src, size_t n)
|
||||
{
|
||||
size_t dest_len = d_strlen(dest);
|
||||
size_t i;
|
||||
|
||||
for (i = 0 ; i < n && src[i] != '\0' ; i++)
|
||||
dest[dest_len + i] = src[i];
|
||||
dest[dest_len + i] = '\0';
|
||||
|
||||
return dest;
|
||||
}
|
||||
|
||||
#define APPEND(str) \
|
||||
do { \
|
||||
int offset = bufp - &printString[0]; \
|
||||
*bufp = '\0'; \
|
||||
d_strncat(bufp, str, PRINT_BUF_SIZE-offset); \
|
||||
bufp += d_strlen(str); \
|
||||
if (bufp >= &printString[PRINT_BUF_SIZE]) \
|
||||
goto done; \
|
||||
} while (0) /* eat semicolon */
|
||||
|
||||
|
||||
#define PRINT_SCALAR(fmt, type) \
|
||||
sprintf(tmpBuf, fmt, *((type *)ptr)); \
|
||||
APPEND(tmpBuf); \
|
||||
break
|
||||
|
||||
#define PRINT_VECTOR(fmt, type) \
|
||||
*bufp++ = '['; \
|
||||
if (bufp == &printString[PRINT_BUF_SIZE]) break; \
|
||||
for (int i = 0; i < width; ++i) { \
|
||||
/* only print the value if the current lane is executing */ \
|
||||
type val0 = *((type*)ptr); \
|
||||
type val = val0; \
|
||||
if (mask & (1ull<<i)) \
|
||||
sprintf(tmpBuf, fmt, val); \
|
||||
else \
|
||||
sprintf(tmpBuf, "(( * )) "); \
|
||||
APPEND(tmpBuf); \
|
||||
*bufp++ = (i != width-1 ? ',' : ']'); \
|
||||
} \
|
||||
break
|
||||
|
||||
extern "C"
|
||||
__device__ void __do_print_nvptx(const char *format, const char *types, int width, uint64_t mask,
|
||||
void **args) {
|
||||
char printString[PRINT_BUF_SIZE+1]; // +1 for trailing NUL
|
||||
char *bufp = &printString[0];
|
||||
char tmpBuf[256];
|
||||
const char trueBuf[] = "true";
|
||||
const char falseBuf[] = "false";
|
||||
|
||||
int argCount = 0;
|
||||
while (*format && bufp < &printString[PRINT_BUF_SIZE]) {
|
||||
// Format strings are just single percent signs.
|
||||
if (*format != '%') {
|
||||
*bufp++ = *format;
|
||||
}
|
||||
else {
|
||||
if (*types) {
|
||||
void *ptr = args[argCount++];
|
||||
// Based on the encoding in the types string, cast the
|
||||
// value appropriately and print it with a reasonable
|
||||
// printf() formatting string.
|
||||
switch (*types) {
|
||||
case 'b': {
|
||||
const char *tmpBuf1 = *((bool *)ptr) ? trueBuf : falseBuf;
|
||||
APPEND(tmpBuf1);
|
||||
break;
|
||||
}
|
||||
case 'B': {
|
||||
*bufp++ = '[';
|
||||
if (bufp == &printString[PRINT_BUF_SIZE])
|
||||
break;
|
||||
for (int i = 0; i < width; ++i) {
|
||||
bool val0 = *((bool*)ptr);
|
||||
bool val = val0; \
|
||||
if (mask & (1ull << i)) {
|
||||
const char *tmpBuf1 = val ? trueBuf : falseBuf;
|
||||
APPEND(tmpBuf1);
|
||||
}
|
||||
else
|
||||
APPEND("_________");
|
||||
*bufp++ = (i != width-1) ? ',' : ']';
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 'i': PRINT_SCALAR("%d", int);
|
||||
case 'I': PRINT_VECTOR("%d", int);
|
||||
case 'u': PRINT_SCALAR("%u", unsigned int);
|
||||
case 'U': PRINT_VECTOR("%u", unsigned int);
|
||||
case 'f': PRINT_SCALAR("%f", float);
|
||||
case 'F': PRINT_VECTOR("%f", float);
|
||||
case 'l': PRINT_SCALAR("%lld", long long);
|
||||
case 'L': PRINT_VECTOR("%lld", long long);
|
||||
case 'v': PRINT_SCALAR("%llu", unsigned long long);
|
||||
case 'V': PRINT_VECTOR("%llu", unsigned long long);
|
||||
case 'd': PRINT_SCALAR("%f", double);
|
||||
case 'D': PRINT_VECTOR("%f", double);
|
||||
case 'p': PRINT_SCALAR("%p", void *);
|
||||
case 'P': PRINT_VECTOR("%p", void *);
|
||||
default:
|
||||
APPEND("UNKNOWN TYPE ");
|
||||
*bufp++ = *types;
|
||||
}
|
||||
++types;
|
||||
}
|
||||
}
|
||||
++format;
|
||||
}
|
||||
|
||||
done:
|
||||
*bufp = '\n'; bufp++;
|
||||
*bufp = '\0';
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright (c) 2010-2011, Intel Corporation
|
||||
Copyright (c) 2010-2013, Intel Corporation
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
@@ -28,7 +28,7 @@
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
/** @file builtins-c.c
|
||||
@@ -50,6 +50,16 @@
|
||||
available to ispc programs at compile time automatically.
|
||||
*/
|
||||
|
||||
#ifdef _MSC_VER
|
||||
// We do want old school sprintf and don't want secure Microsoft extensions.
|
||||
// And we also don't want warnings about it, so the define.
|
||||
#define _CRT_SECURE_NO_WARNINGS
|
||||
#else
|
||||
// Some versions of glibc has "fortification" feature, which expands sprintf
|
||||
// to __builtin___sprintf_chk(..., __builtin_object_size(...), ...).
|
||||
// We don't want this kind of expansion, as we don't support these intrinsics.
|
||||
#define _FORTIFY_SOURCE 0
|
||||
#endif
|
||||
|
||||
#ifndef _MSC_VER
|
||||
#include <unistd.h>
|
||||
@@ -59,22 +69,39 @@
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <stdarg.h>
|
||||
#include <string.h>
|
||||
|
||||
typedef int Bool;
|
||||
|
||||
#define PRINT_SCALAR(fmt, type) \
|
||||
printf(fmt, *((type *)ptr)); \
|
||||
#define PRINT_BUF_SIZE 4096
|
||||
|
||||
#define APPEND(str) \
|
||||
do { \
|
||||
int offset = bufp - &printString[0]; \
|
||||
*bufp = '\0'; \
|
||||
strncat(bufp, str, PRINT_BUF_SIZE-offset); \
|
||||
bufp += strlen(str); \
|
||||
if (bufp >= &printString[PRINT_BUF_SIZE]) \
|
||||
goto done; \
|
||||
} while (0) /* eat semicolon */
|
||||
|
||||
|
||||
#define PRINT_SCALAR(fmt, type) \
|
||||
sprintf(tmpBuf, fmt, *((type *)ptr)); \
|
||||
APPEND(tmpBuf); \
|
||||
break
|
||||
|
||||
#define PRINT_VECTOR(fmt, type) \
|
||||
putchar('['); \
|
||||
*bufp++ = '['; \
|
||||
if (bufp == &printString[PRINT_BUF_SIZE]) break; \
|
||||
for (int i = 0; i < width; ++i) { \
|
||||
/* only print the value if the current lane is executing */ \
|
||||
if (mask & (1<<i)) \
|
||||
printf(fmt, ((type *)ptr)[i]); \
|
||||
if (mask & (1ull<<i)) \
|
||||
sprintf(tmpBuf, fmt, ((type *)ptr)[i]); \
|
||||
else \
|
||||
printf("((" fmt "))", ((type *)ptr)[i]); \
|
||||
putchar(i != width-1 ? ',' : ']'); \
|
||||
sprintf(tmpBuf, "((" fmt "))", ((type *)ptr)[i]); \
|
||||
APPEND(tmpBuf); \
|
||||
*bufp++ = (i != width-1 ? ',' : ']'); \
|
||||
} \
|
||||
break
|
||||
|
||||
@@ -84,21 +111,23 @@ typedef int Bool;
|
||||
|
||||
@param format Print format string
|
||||
@param types Encoded types of the values being printed.
|
||||
(See lEncodeType()).
|
||||
(See lEncodeType()).
|
||||
@param width Vector width of the compilation target
|
||||
@param mask Current lane mask when the print statemnt is called
|
||||
@param args Array of pointers to the values to be printed
|
||||
*/
|
||||
void __do_print(const char *format, const char *types, int width, int mask,
|
||||
void __do_print(const char *format, const char *types, int width, uint64_t mask,
|
||||
void **args) {
|
||||
if (mask == 0)
|
||||
return;
|
||||
char printString[PRINT_BUF_SIZE+1]; // +1 for trailing NUL
|
||||
char *bufp = &printString[0];
|
||||
char tmpBuf[256];
|
||||
|
||||
int argCount = 0;
|
||||
while (*format) {
|
||||
while (*format && bufp < &printString[PRINT_BUF_SIZE]) {
|
||||
// Format strings are just single percent signs.
|
||||
if (*format != '%')
|
||||
putchar(*format);
|
||||
if (*format != '%') {
|
||||
*bufp++ = *format;
|
||||
}
|
||||
else {
|
||||
if (*types) {
|
||||
void *ptr = args[argCount++];
|
||||
@@ -107,17 +136,22 @@ void __do_print(const char *format, const char *types, int width, int mask,
|
||||
// printf() formatting string.
|
||||
switch (*types) {
|
||||
case 'b': {
|
||||
printf("%s", *((Bool *)ptr) ? "true" : "false");
|
||||
sprintf(tmpBuf, "%s", *((Bool *)ptr) ? "true" : "false");
|
||||
APPEND(tmpBuf);
|
||||
break;
|
||||
}
|
||||
case 'B': {
|
||||
putchar('[');
|
||||
*bufp++ = '[';
|
||||
if (bufp == &printString[PRINT_BUF_SIZE])
|
||||
break;
|
||||
for (int i = 0; i < width; ++i) {
|
||||
if (mask & (1<<i))
|
||||
printf("%s", ((Bool *)ptr)[i] ? "true" : "false");
|
||||
if (mask & (1ull << i)) {
|
||||
sprintf(tmpBuf, "%s", ((Bool *)ptr)[i] ? "true" : "false");
|
||||
APPEND(tmpBuf);
|
||||
}
|
||||
else
|
||||
printf("_________");
|
||||
putchar(i != width-1 ? ',' : ']');
|
||||
APPEND("_________");
|
||||
*bufp++ = (i != width-1) ? ',' : ']';
|
||||
}
|
||||
break;
|
||||
}
|
||||
@@ -136,17 +170,96 @@ void __do_print(const char *format, const char *types, int width, int mask,
|
||||
case 'p': PRINT_SCALAR("%p", void *);
|
||||
case 'P': PRINT_VECTOR("%p", void *);
|
||||
default:
|
||||
printf("UNKNOWN TYPE ");
|
||||
putchar(*types);
|
||||
APPEND("UNKNOWN TYPE ");
|
||||
*bufp++ = *types;
|
||||
}
|
||||
++types;
|
||||
}
|
||||
}
|
||||
++format;
|
||||
}
|
||||
|
||||
done:
|
||||
*bufp = '\0';
|
||||
fputs(printString, stdout);
|
||||
fflush(stdout);
|
||||
}
|
||||
|
||||
/* this is print for PTX target only */
|
||||
int __puts_nvptx(const char *);
|
||||
void __do_print_nvptx(const char *format, const char *types, int width, uint64_t mask,
|
||||
void **args) {
|
||||
#if 0
|
||||
char printString[PRINT_BUF_SIZE+1]; // +1 for trailing NUL
|
||||
char *bufp = &printString[0];
|
||||
char tmpBuf[256];
|
||||
|
||||
int argCount = 0;
|
||||
while (*format && bufp < &printString[PRINT_BUF_SIZE]) {
|
||||
// Format strings are just single percent signs.
|
||||
if (*format != '%') {
|
||||
*bufp++ = *format;
|
||||
}
|
||||
else {
|
||||
if (*types) {
|
||||
void *ptr = args[argCount++];
|
||||
// Based on the encoding in the types string, cast the
|
||||
// value appropriately and print it with a reasonable
|
||||
// printf() formatting string.
|
||||
switch (*types) {
|
||||
case 'b': {
|
||||
sprintf(tmpBuf, "%s", *((Bool *)ptr) ? "true" : "false");
|
||||
APPEND(tmpBuf);
|
||||
break;
|
||||
}
|
||||
case 'B': {
|
||||
*bufp++ = '[';
|
||||
if (bufp == &printString[PRINT_BUF_SIZE])
|
||||
break;
|
||||
for (int i = 0; i < width; ++i) {
|
||||
if (mask & (1ull << i)) {
|
||||
sprintf(tmpBuf, "%s", ((Bool *)ptr)[i] ? "true" : "false");
|
||||
APPEND(tmpBuf);
|
||||
}
|
||||
else
|
||||
APPEND("_________");
|
||||
*bufp++ = (i != width-1) ? ',' : ']';
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 'i': PRINT_SCALAR("%d", int);
|
||||
case 'I': PRINT_VECTOR("%d", int);
|
||||
case 'u': PRINT_SCALAR("%u", unsigned int);
|
||||
case 'U': PRINT_VECTOR("%u", unsigned int);
|
||||
case 'f': PRINT_SCALAR("%f", float);
|
||||
case 'F': PRINT_VECTOR("%f", float);
|
||||
case 'l': PRINT_SCALAR("%lld", long long);
|
||||
case 'L': PRINT_VECTOR("%lld", long long);
|
||||
case 'v': PRINT_SCALAR("%llu", unsigned long long);
|
||||
case 'V': PRINT_VECTOR("%llu", unsigned long long);
|
||||
case 'd': PRINT_SCALAR("%f", double);
|
||||
case 'D': PRINT_VECTOR("%f", double);
|
||||
case 'p': PRINT_SCALAR("%p", void *);
|
||||
case 'P': PRINT_VECTOR("%p", void *);
|
||||
default:
|
||||
APPEND("UNKNOWN TYPE ");
|
||||
*bufp++ = *types;
|
||||
}
|
||||
++types;
|
||||
}
|
||||
}
|
||||
++format;
|
||||
}
|
||||
|
||||
done:
|
||||
*bufp = '\n'; bufp++;
|
||||
*bufp = '\0';
|
||||
__puts_nvptx(printString);
|
||||
#else
|
||||
__puts_nvptx("---nvptx printing is not support---\n");
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
int __num_cores() {
|
||||
#if defined(_MSC_VER) || defined(__MINGW32__)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
;; Copyright (c) 2011, Intel Corporation
|
||||
;; Copyright (c) 2011-2016, Intel Corporation
|
||||
;; All rights reserved.
|
||||
;;
|
||||
;; Redistribution and use in source and binary forms, with or without
|
||||
@@ -41,30 +41,97 @@
|
||||
|
||||
@__system_best_isa = internal global i32 -1
|
||||
|
||||
declare void @abort() noreturn
|
||||
|
||||
;; The below is the result of running "clang -O2 -emit-llvm -c -o -" on the
|
||||
;; following code... Specifically, __get_system_isa should return a value
|
||||
;; corresponding to one of the Target::ISA enumerant values that gives the
|
||||
;; most capable ISA that the curremt system can run.
|
||||
;;
|
||||
;; #ifdef _MSC_VER
|
||||
;; extern void __stdcall __cpuid(int info[4], int infoType);
|
||||
;; #else
|
||||
;;
|
||||
;; #include <stdint.h>
|
||||
;; #include <stdlib.h>
|
||||
;;
|
||||
;; static void __cpuid(int info[4], int infoType) {
|
||||
;; __asm__ __volatile__ ("cpuid"
|
||||
;; : "=a" (info[0]), "=b" (info[1]), "=c" (info[2]), "=d" (info[3])
|
||||
;; : "0" (infoType));
|
||||
;; }
|
||||
;; #endif
|
||||
;;
|
||||
;; // Save %ebx in case it's the PIC register.
|
||||
;; static void __cpuid_count(int info[4], int level, int count) {
|
||||
;; __asm__ __volatile__ ("xchg{l}\t{%%}ebx, %1\n\t"
|
||||
;; "cpuid\n\t"
|
||||
;; "xchg{l}\t{%%}ebx, %1\n\t"
|
||||
;; : "=a" (info[0]), "=r" (info[1]), "=c" (info[2]), "=d" (info[3])
|
||||
;; : "0" (level), "2" (count));
|
||||
;; }
|
||||
;;
|
||||
;; static int __os_has_avx_support() {
|
||||
;; // Check xgetbv; this uses a .byte sequence instead of the instruction
|
||||
;; // directly because older assemblers do not include support for xgetbv and
|
||||
;; // there is no easy way to conditionally compile based on the assembler used.
|
||||
;; int rEAX, rEDX;
|
||||
;; __asm__ __volatile__ (".byte 0x0f, 0x01, 0xd0" : "=a" (rEAX), "=d" (rEDX) : "c" (0));
|
||||
;; return (rEAX & 6) == 6;
|
||||
;; }
|
||||
;;
|
||||
;; static int __os_has_avx512_support() {
|
||||
;; // Check if the OS saves the XMM, YMM and ZMM registers, i.e. it supports AVX2 and AVX512.
|
||||
;; // See section 2.1 of software.intel.com/sites/default/files/managed/0d/53/319433-022.pdf
|
||||
;; // Check xgetbv; this uses a .byte sequence instead of the instruction
|
||||
;; // directly because older assemblers do not include support for xgetbv and
|
||||
;; // there is no easy way to conditionally compile based on the assembler used.
|
||||
;; int rEAX, rEDX;
|
||||
;; __asm__ __volatile__ (".byte 0x0f, 0x01, 0xd0" : "=a" (rEAX), "=d" (rEDX) : "c" (0));
|
||||
;; return (rEAX & 0xE6) == 0xE6;
|
||||
;; }
|
||||
;;
|
||||
;; int32_t __get_system_isa() {
|
||||
;; int info[4];
|
||||
;; __cpuid(info, 1);
|
||||
;; /* NOTE: the values returned below must be the same as the
|
||||
;; corresponding enumerant values in Target::ISA. */
|
||||
;; if ((info[2] & (1 << 28)) != 0)
|
||||
;; return 2; // AVX
|
||||
;;
|
||||
;; // Call cpuid with eax=7, ecx=0
|
||||
;; int info2[4];
|
||||
;; __cpuid_count(info2, 7, 0);
|
||||
;;
|
||||
;; // NOTE: the values returned below must be the same as the
|
||||
;; // corresponding enumerant values in Target::ISA.
|
||||
;; if ((info[2] & (1 << 27)) != 0 && // OSXSAVE
|
||||
;; (info2[1] & (1 << 5)) != 0 && // AVX2
|
||||
;; (info2[1] & (1 << 16)) != 0 && // AVX512 F
|
||||
;; __os_has_avx512_support()) {
|
||||
;; // We need to verify that AVX2 is also available,
|
||||
;; // as well as AVX512, because our targets are supposed
|
||||
;; // to use both.
|
||||
;;
|
||||
;; if ((info2[1] & (1 << 17)) != 0 && // AVX512 DQ
|
||||
;; (info2[1] & (1 << 28)) != 0 && // AVX512 CDI
|
||||
;; (info2[1] & (1 << 30)) != 0 && // AVX512 BW
|
||||
;; (info2[1] & (1 << 31)) != 0) { // AVX512 VL
|
||||
;; return 6; // SKX
|
||||
;; }
|
||||
;; else if ((info2[1] & (1 << 26)) != 0 && // AVX512 PF
|
||||
;; (info2[1] & (1 << 27)) != 0 && // AVX512 ER
|
||||
;; (info2[1] & (1 << 28)) != 0) { // AVX512 CDI
|
||||
;; return 5; // KNL_AVX512
|
||||
;; }
|
||||
;; // If it's unknown AVX512 target, fall through and use AVX2
|
||||
;; // or whatever is available in the machine.
|
||||
;; }
|
||||
;;
|
||||
;; if ((info[2] & (1 << 27)) != 0 && // OSXSAVE
|
||||
;; (info[2] & (1 << 28)) != 0 &&
|
||||
;; __os_has_avx_support()) {
|
||||
;; if ((info[2] & (1 << 29)) != 0 && // F16C
|
||||
;; (info[2] & (1 << 30)) != 0) { // RDRAND
|
||||
;; // So far, so good. AVX2?
|
||||
;; if ((info2[1] & (1 << 5)) != 0)
|
||||
;; return 4;
|
||||
;; else
|
||||
;; return 3;
|
||||
;; }
|
||||
;; // Regular AVX
|
||||
;; return 2;
|
||||
;; }
|
||||
;; else if ((info[2] & (1 << 19)) != 0)
|
||||
;; return 1; // SSE4
|
||||
;; else if ((info[3] & (1 << 26)) != 0)
|
||||
@@ -73,42 +140,112 @@ declare void @abort() noreturn
|
||||
;; abort();
|
||||
;; }
|
||||
|
||||
%0 = type { i32, i32, i32, i32 }
|
||||
|
||||
define i32 @__get_system_isa() nounwind ssp {
|
||||
%1 = tail call %0 asm sideeffect "cpuid", "={ax},={bx},={cx},={dx},0,~{dirflag},~{fpsr},~{flags}"(i32 1) nounwind
|
||||
%2 = extractvalue %0 %1, 2
|
||||
%3 = extractvalue %0 %1, 3
|
||||
%4 = and i32 %2, 268435456
|
||||
%5 = icmp eq i32 %4, 0
|
||||
br i1 %5, label %6, label %13
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
|
||||
; <label>:6 ; preds = %0
|
||||
%7 = and i32 %2, 524288
|
||||
%8 = icmp eq i32 %7, 0
|
||||
br i1 %8, label %9, label %13
|
||||
;; LLVM has different IR for different versions since 3.7
|
||||
|
||||
; <label>:9 ; preds = %6
|
||||
%10 = and i32 %3, 67108864
|
||||
%11 = icmp eq i32 %10, 0
|
||||
br i1 %11, label %12, label %13
|
||||
define(`PTR_OP_ARGS',
|
||||
ifelse(LLVM_VERSION, LLVM_3_7,
|
||||
``$1 , $1 *'',
|
||||
LLVM_VERSION, LLVM_3_8,
|
||||
``$1 , $1 *'',
|
||||
LLVM_VERSION, LLVM_3_9,
|
||||
``$1 , $1 *'',
|
||||
LLVM_VERSION, LLVM_4_0,
|
||||
``$1 , $1 *'',
|
||||
LLVM_VERSION, LLVM_5_0,
|
||||
``$1 , $1 *'',
|
||||
``$1 *''
|
||||
)
|
||||
)
|
||||
|
||||
; <label>:12 ; preds = %9
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
|
||||
define i32 @__get_system_isa() nounwind uwtable {
|
||||
entry:
|
||||
%0 = tail call { i32, i32, i32, i32 } asm sideeffect "cpuid", "={ax},={bx},={cx},={dx},0,~{dirflag},~{fpsr},~{flags}"(i32 1) nounwind
|
||||
%asmresult5.i = extractvalue { i32, i32, i32, i32 } %0, 2
|
||||
%asmresult6.i = extractvalue { i32, i32, i32, i32 } %0, 3
|
||||
%1 = tail call { i32, i32, i32, i32 } asm sideeffect "xchg$(l$)\09$(%$)ebx, $1\0A\09cpuid\0A\09xchg$(l$)\09$(%$)ebx, $1\0A\09", "={ax},=r,={cx},={dx},0,2,~{dirflag},~{fpsr},~{flags}"(i32 7, i32 0) nounwind
|
||||
%asmresult4.i87 = extractvalue { i32, i32, i32, i32 } %1, 1
|
||||
%and = and i32 %asmresult5.i, 134217728
|
||||
%cmp = icmp eq i32 %and, 0
|
||||
br i1 %cmp, label %if.else65, label %land.lhs.true
|
||||
|
||||
land.lhs.true: ; preds = %entry
|
||||
%2 = and i32 %asmresult4.i87, 65568
|
||||
%3 = icmp eq i32 %2, 65568
|
||||
br i1 %3, label %land.lhs.true9, label %if.end39
|
||||
|
||||
land.lhs.true9: ; preds = %land.lhs.true
|
||||
%4 = tail call { i32, i32 } asm sideeffect ".byte 0x0f, 0x01, 0xd0", "={ax},={dx},{cx},~{dirflag},~{fpsr},~{flags}"(i32 0) nounwind
|
||||
%asmresult.i90 = extractvalue { i32, i32 } %4, 0
|
||||
%and.i = and i32 %asmresult.i90, 230
|
||||
%cmp.i = icmp eq i32 %and.i, 230
|
||||
br i1 %cmp.i, label %if.then, label %if.end39
|
||||
|
||||
if.then: ; preds = %land.lhs.true9
|
||||
%5 = and i32 %asmresult4.i87, -805175296
|
||||
%6 = icmp eq i32 %5, -805175296
|
||||
br i1 %6, label %return, label %if.else
|
||||
|
||||
if.else: ; preds = %if.then
|
||||
%7 = and i32 %asmresult4.i87, 469762048
|
||||
%8 = icmp eq i32 %7, 469762048
|
||||
br i1 %8, label %return, label %if.end39
|
||||
|
||||
if.end39: ; preds = %if.else, %land.lhs.true9, %land.lhs.true
|
||||
%9 = and i32 %asmresult5.i, 402653184
|
||||
%10 = icmp eq i32 %9, 402653184
|
||||
br i1 %10, label %land.lhs.true47, label %if.else65
|
||||
|
||||
land.lhs.true47: ; preds = %if.end39
|
||||
%11 = tail call { i32, i32 } asm sideeffect ".byte 0x0f, 0x01, 0xd0", "={ax},={dx},{cx},~{dirflag},~{fpsr},~{flags}"(i32 0) nounwind
|
||||
%asmresult.i91 = extractvalue { i32, i32 } %11, 0
|
||||
%and.i92 = and i32 %asmresult.i91, 6
|
||||
%cmp.i93 = icmp eq i32 %and.i92, 6
|
||||
br i1 %cmp.i93, label %if.then50, label %if.else65
|
||||
|
||||
if.then50: ; preds = %land.lhs.true47
|
||||
%12 = and i32 %asmresult5.i, 1610612736
|
||||
%13 = icmp eq i32 %12, 1610612736
|
||||
br i1 %13, label %if.then58, label %return
|
||||
|
||||
if.then58: ; preds = %if.then50
|
||||
%and60 = lshr i32 %asmresult4.i87, 5
|
||||
%14 = and i32 %and60, 1
|
||||
%15 = add i32 %14, 3
|
||||
br label %return
|
||||
|
||||
if.else65: ; preds = %land.lhs.true47, %if.end39, %entry
|
||||
%and67 = and i32 %asmresult5.i, 524288
|
||||
%cmp68 = icmp eq i32 %and67, 0
|
||||
br i1 %cmp68, label %if.else70, label %return
|
||||
|
||||
if.else70: ; preds = %if.else65
|
||||
%and72 = and i32 %asmresult6.i, 67108864
|
||||
%cmp73 = icmp eq i32 %and72, 0
|
||||
br i1 %cmp73, label %if.else75, label %return
|
||||
|
||||
if.else75: ; preds = %if.else70
|
||||
tail call void @abort() noreturn nounwind
|
||||
unreachable
|
||||
|
||||
; <label>:13 ; preds = %9, %6, %0
|
||||
%.0 = phi i32 [ 2, %0 ], [ 1, %6 ], [ 0, %9 ]
|
||||
ret i32 %.0
|
||||
return: ; preds = %if.else70, %if.else65, %if.then58, %if.then50, %if.else, %if.then
|
||||
%retval.0 = phi i32 [ 6, %if.then ], [ 5, %if.else ], [ %15, %if.then58 ], [ 2, %if.then50 ], [ 1, %if.else65 ], [ 0, %if.else70 ]
|
||||
ret i32 %retval.0
|
||||
}
|
||||
|
||||
declare void @abort() noreturn nounwind
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; This function is called by each of the dispatch functions we generate;
|
||||
;; it sets @__system_best_isa if it is unset.
|
||||
|
||||
define void @__set_system_isa() {
|
||||
entry:
|
||||
%bi = load i32* @__system_best_isa
|
||||
%bi = load PTR_OP_ARGS(`i32 ') @__system_best_isa
|
||||
%unset = icmp eq i32 %bi, -1
|
||||
br i1 %unset, label %set_system_isa, label %done
|
||||
|
||||
|
||||
216
builtins/svml.m4
Normal file
216
builtins/svml.m4
Normal file
@@ -0,0 +1,216 @@
|
||||
;; Copyright (c) 2013-2015, Intel Corporation
|
||||
;; All rights reserved.
|
||||
;;
|
||||
;; Redistribution and use in source and binary forms, with or without
|
||||
;; modification, are permitted provided that the following conditions are
|
||||
;; met:
|
||||
;;
|
||||
;; * Redistributions of source code must retain the above copyright
|
||||
;; notice, this list of conditions and the following disclaimer.
|
||||
;;
|
||||
;; * Redistributions in binary form must reproduce the above copyright
|
||||
;; notice, this list of conditions and the following disclaimer in the
|
||||
;; documentation and/or other materials provided with the distribution.
|
||||
;;
|
||||
;; * Neither the name of Intel Corporation nor the names of its
|
||||
;; contributors may be used to endorse or promote products derived from
|
||||
;; this software without specific prior written permission.
|
||||
;;
|
||||
;;
|
||||
;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
||||
;; IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
||||
;; TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
|
||||
;; PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
|
||||
;; OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||
;; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
;; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
;; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
;; LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
;; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
;; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
|
||||
;; svml macro
|
||||
|
||||
;; svml_stubs : stubs for svml calls
|
||||
;; $1 - type ("float" or "double")
|
||||
;; $2 - svml internal function suffix ("f" for float, "d" for double)
|
||||
;; $3 - vector width
|
||||
define(`svml_stubs',`
|
||||
declare <$3 x $1> @__svml_sin$2(<$3 x $1>) nounwind readnone alwaysinline
|
||||
declare <$3 x $1> @__svml_asin$2(<$3 x $1>) nounwind readnone alwaysinline
|
||||
declare <$3 x $1> @__svml_cos$2(<$3 x $1>) nounwind readnone alwaysinline
|
||||
declare void @__svml_sincos$2(<$3 x $1>, <$3 x $1> *, <$3 x $1> *) nounwind alwaysinline
|
||||
declare <$3 x $1> @__svml_tan$2(<$3 x $1>) nounwind readnone alwaysinline
|
||||
declare <$3 x $1> @__svml_atan$2(<$3 x $1>) nounwind readnone alwaysinline
|
||||
declare <$3 x $1> @__svml_atan2$2(<$3 x $1>, <$3 x $1>) nounwind readnone alwaysinline
|
||||
declare <$3 x $1> @__svml_exp$2(<$3 x $1>) nounwind readnone alwaysinline
|
||||
declare <$3 x $1> @__svml_log$2(<$3 x $1>) nounwind readnone alwaysinline
|
||||
declare <$3 x $1> @__svml_pow$2(<$3 x $1>, <$3 x $1>) nounwind readnone alwaysinline
|
||||
')
|
||||
|
||||
;; svml_declare : declaration of __svml_* intrinsics
|
||||
;; $1 - type ("float" or "double")
|
||||
;; $2 - __svml_* intrinsic function suffix
|
||||
;; float: "f4"(sse) "f8"(avx) "f16"(avx512)
|
||||
;; double: "2"(sse) "4"(avx) "8"(avx512)
|
||||
;; $3 - vector width
|
||||
define(`svml_declare',`
|
||||
declare <$3 x $1> @__svml_sin$2(<$3 x $1>) nounwind readnone
|
||||
declare <$3 x $1> @__svml_asin$2(<$3 x $1>) nounwind readnone
|
||||
declare <$3 x $1> @__svml_cos$2(<$3 x $1>) nounwind readnone
|
||||
declare <$3 x $1> @__svml_sincos$2(<$3 x $1> *, <$3 x $1>) nounwind readnone
|
||||
declare <$3 x $1> @__svml_tan$2(<$3 x $1>) nounwind readnone
|
||||
declare <$3 x $1> @__svml_atan$2(<$3 x $1>) nounwind readnone
|
||||
declare <$3 x $1> @__svml_atan2$2(<$3 x $1>, <$3 x $1>) nounwind readnone
|
||||
declare <$3 x $1> @__svml_exp$2(<$3 x $1>) nounwind readnone
|
||||
declare <$3 x $1> @__svml_log$2(<$3 x $1>) nounwind readnone
|
||||
declare <$3 x $1> @__svml_pow$2(<$3 x $1>, <$3 x $1>) nounwind readnone
|
||||
');
|
||||
|
||||
;; defintition of __svml_* internal functions
|
||||
;; $1 - type ("float" or "double")
|
||||
;; $2 - __svml_* intrinsic function suffix
|
||||
;; float: "f4"(sse) "f8"(avx) "f16"(avx512)
|
||||
;; double: "2"(sse) "4"(avx) "8"(avx512)
|
||||
;; $3 - vector width
|
||||
;; $4 - svml internal function suffix ("f" for float, "d" for double)
|
||||
define(`svml_define',`
|
||||
define <$3 x $1> @__svml_sin$4(<$3 x $1>) nounwind readnone alwaysinline {
|
||||
%ret = call <$3 x $1> @__svml_sin$2(<$3 x $1> %0)
|
||||
ret <$3 x $1> %ret
|
||||
}
|
||||
define <$3 x $1> @__svml_asin$4(<$3 x $1>) nounwind readnone alwaysinline {
|
||||
%ret = call <$3 x $1> @__svml_asin$2(<$3 x $1> %0)
|
||||
ret <$3 x $1> %ret
|
||||
}
|
||||
|
||||
define <$3 x $1> @__svml_cos$4(<$3 x $1>) nounwind readnone alwaysinline {
|
||||
%ret = call <$3 x $1> @__svml_cos$2(<$3 x $1> %0)
|
||||
ret <$3 x $1> %ret
|
||||
}
|
||||
|
||||
define void @__svml_sincos$4(<$3 x $1>, <$3 x $1> *, <$3 x $1> *) nounwind alwaysinline {
|
||||
%s = call <$3 x $1> @__svml_sincos$2(<$3 x $1> * %2, <$3 x $1> %0)
|
||||
store <$3 x $1> %s, <$3 x $1> * %1
|
||||
ret void
|
||||
}
|
||||
|
||||
define <$3 x $1> @__svml_tan$4(<$3 x $1>) nounwind readnone alwaysinline {
|
||||
%ret = call <$3 x $1> @__svml_tan$2(<$3 x $1> %0)
|
||||
ret <$3 x $1> %ret
|
||||
}
|
||||
|
||||
define <$3 x $1> @__svml_atan$4(<$3 x $1>) nounwind readnone alwaysinline {
|
||||
%ret = call <$3 x $1> @__svml_atan$2(<$3 x $1> %0)
|
||||
ret <$3 x $1> %ret
|
||||
}
|
||||
|
||||
define <$3 x $1> @__svml_atan2$4(<$3 x $1>, <$3 x $1>) nounwind readnone alwaysinline {
|
||||
%ret = call <$3 x $1> @__svml_atan2$2(<$3 x $1> %0, <$3 x $1> %1)
|
||||
ret <$3 x $1> %ret
|
||||
}
|
||||
|
||||
define <$3 x $1> @__svml_exp$4(<$3 x $1>) nounwind readnone alwaysinline {
|
||||
%ret = call <$3 x $1> @__svml_exp$2(<$3 x $1> %0)
|
||||
ret <$3 x $1> %ret
|
||||
}
|
||||
|
||||
define <$3 x $1> @__svml_log$4(<$3 x $1>) nounwind readnone alwaysinline {
|
||||
%ret = call <$3 x $1> @__svml_log$2(<$3 x $1> %0)
|
||||
ret <$3 x $1> %ret
|
||||
}
|
||||
|
||||
define <$3 x $1> @__svml_pow$4(<$3 x $1>, <$3 x $1>) nounwind readnone alwaysinline {
|
||||
%ret = call <$3 x $1> @__svml_pow$2(<$3 x $1> %0, <$3 x $1> %1)
|
||||
ret <$3 x $1> %ret
|
||||
}
|
||||
')
|
||||
|
||||
|
||||
;; svml_define_x : defintition of __svml_* internal functions operation on extended width
|
||||
;; $1 - type ("float" or "double")
|
||||
;; $2 - __svml_* intrinsic function suffix
|
||||
;; float: "f4"(sse) "f8"(avx) "f16"(avx512)
|
||||
;; double: "2"(sse) "4"(avx) "8"(avx512)
|
||||
;; $3 - vector width
|
||||
;; $4 - svml internal function suffix ("f" for float, "d" for double)
|
||||
;; $5 - extended width, must be at least twice the native vector width
|
||||
;; contigent on existing of unary$3to$5 and binary$3to$5 macros
|
||||
|
||||
;; *todo*: in sincos call use __svml_sincos[f][2,4,8,16] call, e.g.
|
||||
;;define void @__svml_sincosf(<8 x float>, <8 x float> *,
|
||||
;; <8 x float> *) nounwind alwaysinline {
|
||||
;; ; call svml_sincosf4 two times with the two 4-wide sub-vectors
|
||||
;; %a = shufflevector <8 x float> %0, <8 x float> undef,
|
||||
;; <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
||||
;; %b = shufflevector <8 x float> %0, <8 x float> undef,
|
||||
;; <4 x i32> <i32 4, i32 5, i32 6, i32 7>
|
||||
;;
|
||||
;; %cospa = alloca <4 x float>
|
||||
;; %sa = call <4 x float> @__svml_sincosf4(<4 x float> * %cospa, <4 x float> %a)
|
||||
;;
|
||||
;; %cospb = alloca <4 x float>
|
||||
;; %sb = call <4 x float> @__svml_sincosf4(<4 x float> * %cospb, <4 x float> %b)
|
||||
;;
|
||||
;; %sin = shufflevector <4 x float> %sa, <4 x float> %sb,
|
||||
;; <8 x i32> <i32 0, i32 1, i32 2, i32 3,
|
||||
;; i32 4, i32 5, i32 6, i32 7>
|
||||
;; store <8 x float> %sin, <8 x float> * %1
|
||||
;;
|
||||
;; %cosa = load <4 x float> * %cospa
|
||||
;; %cosb = load <4 x float> * %cospb
|
||||
;; %cos = shufflevector <4 x float> %cosa, <4 x float> %cosb,
|
||||
;; <8 x i32> <i32 0, i32 1, i32 2, i32 3,
|
||||
;; i32 4, i32 5, i32 6, i32 7>
|
||||
;; store <8 x float> %cos, <8 x float> * %2
|
||||
;;
|
||||
;; ret void
|
||||
;;}
|
||||
define(`svml_define_x',`
|
||||
define <$5 x $1> @__svml_sin$4(<$5 x $1>) nounwind readnone alwaysinline {
|
||||
unary$3to$5(ret, $1, @__svml_sin$2, %0)
|
||||
ret <$5 x $1> %ret
|
||||
}
|
||||
define <$5 x $1> @__svml_asin$4(<$5 x $1>) nounwind readnone alwaysinline {
|
||||
unary$3to$5(ret, $1, @__svml_asin$2, %0)
|
||||
ret <$5 x $1> %ret
|
||||
}
|
||||
define <$5 x $1> @__svml_cos$4(<$5 x $1>) nounwind readnone alwaysinline {
|
||||
unary$3to$5(ret, $1, @__svml_cos$2, %0)
|
||||
ret <$5 x $1> %ret
|
||||
}
|
||||
define void @__svml_sincos$4(<$5 x $1>,<$5 x $1>*,<$5 x $1>*) nounwind alwaysinline
|
||||
{
|
||||
%s = call <$5 x $1> @__svml_sin$4(<$5 x $1> %0)
|
||||
%c = call <$5 x $1> @__svml_cos$4(<$5 x $1> %0)
|
||||
store <$5 x $1> %s, <$5 x $1> * %1
|
||||
store <$5 x $1> %c, <$5 x $1> * %2
|
||||
ret void
|
||||
}
|
||||
define <$5 x $1> @__svml_tan$4(<$5 x $1>) nounwind readnone alwaysinline {
|
||||
unary$3to$5(ret, $1, @__svml_tan$2, %0)
|
||||
ret <$5 x $1> %ret
|
||||
}
|
||||
define <$5 x $1> @__svml_atan$4(<$5 x $1>) nounwind readnone alwaysinline {
|
||||
unary$3to$5(ret, $1, @__svml_atan$2, %0)
|
||||
ret <$5 x $1> %ret
|
||||
}
|
||||
define <$5 x $1> @__svml_atan2$4(<$5 x $1>,<$5 x $1>) nounwind readnone alwaysinline {
|
||||
binary$3to$5(ret, $1, @__svml_atan2$2, %0, %1)
|
||||
ret <$5 x $1> %ret
|
||||
}
|
||||
define <$5 x $1> @__svml_exp$4(<$5 x $1>) nounwind readnone alwaysinline {
|
||||
unary$3to$5(ret, $1, @__svml_exp$2, %0)
|
||||
ret <$5 x $1> %ret
|
||||
}
|
||||
define <$5 x $1> @__svml_log$4(<$5 x $1>) nounwind readnone alwaysinline {
|
||||
unary$3to$5(ret, $1, @__svml_log$2, %0)
|
||||
ret <$5 x $1> %ret
|
||||
}
|
||||
define <$5 x $1> @__svml_pow$4(<$5 x $1>,<$5 x $1>) nounwind readnone alwaysinline {
|
||||
binary$3to$5(ret, $1, @__svml_pow$2, %0, %1)
|
||||
ret <$5 x $1> %ret
|
||||
}
|
||||
')
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
;; Copyright (c) 2010-2011, Intel Corporation
|
||||
;; Copyright (c) 2010-2015, Intel Corporation
|
||||
;; All rights reserved.
|
||||
;;
|
||||
;; Redistribution and use in source and binary forms, with or without
|
||||
@@ -31,30 +31,16 @@
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; AVX target implementation.
|
||||
;;
|
||||
;; Please note that this file uses SSE intrinsics, but LLVM generates AVX
|
||||
;; instructions, so it doesn't makes sense to change this implemenation.
|
||||
|
||||
|
||||
ctlztz()
|
||||
define_prefetches()
|
||||
define_shuffles()
|
||||
aossoa()
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; rcp
|
||||
|
||||
declare <4 x float> @llvm.x86.sse.rcp.ss(<4 x float>) nounwind readnone
|
||||
|
||||
define float @__rcp_uniform_float(float) nounwind readonly alwaysinline {
|
||||
; uniform float iv = extract(__rcp_u(v), 0);
|
||||
; return iv * (2. - v * iv);
|
||||
%vecval = insertelement <4 x float> undef, float %0, i32 0
|
||||
%call = call <4 x float> @llvm.x86.sse.rcp.ss(<4 x float> %vecval)
|
||||
%scall = extractelement <4 x float> %call, i32 0
|
||||
|
||||
; do one N-R iteration
|
||||
%v_iv = fmul float %0, %scall
|
||||
%two_minus = fsub float 2., %v_iv
|
||||
%iv_mul = fmul float %scall, %two_minus
|
||||
ret float %iv_mul
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; rounding floats
|
||||
@@ -77,7 +63,8 @@ define float @__round_uniform_float(float) nounwind readonly alwaysinline {
|
||||
; r3 = a3
|
||||
;
|
||||
; It doesn't matter what we pass as a, since we only need the r0 value
|
||||
; here. So we pass the same register for both.
|
||||
; here. So we pass the same register for both. Further, only the 0th
|
||||
; element of the b parameter matters
|
||||
%xi = insertelement <4 x float> undef, float %0, i32 0
|
||||
%xr = call <4 x float> @llvm.x86.sse41.round.ss(<4 x float> %xi, <4 x float> %xi, i32 8)
|
||||
%rs = extractelement <4 x float> %xr, i32 0
|
||||
@@ -117,7 +104,7 @@ define double @__round_uniform_double(double) nounwind readonly alwaysinline {
|
||||
define double @__floor_uniform_double(double) nounwind readonly alwaysinline {
|
||||
; see above for round_ss instrinsic discussion...
|
||||
%xi = insertelement <2 x double> undef, double %0, i32 0
|
||||
; roundpd, round down 0b01 | don't signal precision exceptions 0b1001 = 9
|
||||
; roundsd, round down 0b01 | don't signal precision exceptions 0b1001 = 9
|
||||
%xr = call <2 x double> @llvm.x86.sse41.round.sd(<2 x double> %xi, <2 x double> %xi, i32 9)
|
||||
%rs = extractelement <2 x double> %xr, i32 0
|
||||
ret double %rs
|
||||
@@ -126,12 +113,31 @@ define double @__floor_uniform_double(double) nounwind readonly alwaysinline {
|
||||
define double @__ceil_uniform_double(double) nounwind readonly alwaysinline {
|
||||
; see above for round_ss instrinsic discussion...
|
||||
%xi = insertelement <2 x double> undef, double %0, i32 0
|
||||
; roundpd, round up 0b10 | don't signal precision exceptions 0b1010 = 10
|
||||
; roundsd, round up 0b10 | don't signal precision exceptions 0b1010 = 10
|
||||
%xr = call <2 x double> @llvm.x86.sse41.round.sd(<2 x double> %xi, <2 x double> %xi, i32 10)
|
||||
%rs = extractelement <2 x double> %xr, i32 0
|
||||
ret double %rs
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; rcp
|
||||
|
||||
declare <4 x float> @llvm.x86.sse.rcp.ss(<4 x float>) nounwind readnone
|
||||
|
||||
define float @__rcp_uniform_float(float) nounwind readonly alwaysinline {
|
||||
; do the rcpss call
|
||||
; uniform float iv = extract(__rcp_u(v), 0);
|
||||
; return iv * (2. - v * iv);
|
||||
%vecval = insertelement <4 x float> undef, float %0, i32 0
|
||||
%call = call <4 x float> @llvm.x86.sse.rcp.ss(<4 x float> %vecval)
|
||||
%scall = extractelement <4 x float> %call, i32 0
|
||||
|
||||
; do one N-R iteration to improve precision, as above
|
||||
%v_iv = fmul float %0, %scall
|
||||
%two_minus = fsub float 2., %v_iv
|
||||
%iv_mul = fmul float %scall, %two_minus
|
||||
ret float %iv_mul
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; rsqrt
|
||||
@@ -144,6 +150,7 @@ define float @__rsqrt_uniform_float(float) nounwind readonly alwaysinline {
|
||||
%vis = call <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float> %v)
|
||||
%is = extractelement <4 x float> %vis, i32 0
|
||||
|
||||
; Newton-Raphson iteration to improve precision
|
||||
; return 0.5 * is * (3. - (v * is) * is);
|
||||
%v_is = fmul float %0, %is
|
||||
%v_is_is = fmul float %v_is, %is
|
||||
@@ -164,9 +171,18 @@ define float @__sqrt_uniform_float(float) nounwind readonly alwaysinline {
|
||||
ret float %ret
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; double precision sqrt
|
||||
|
||||
declare <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double>) nounwind readnone
|
||||
|
||||
define double @__sqrt_uniform_double(double) nounwind alwaysinline {
|
||||
sse_unary_scalar(ret, 2, double, @llvm.x86.sse2.sqrt.sd, %0)
|
||||
ret double %ret
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; fastmath
|
||||
;; fast math mode
|
||||
|
||||
declare void @llvm.x86.sse.stmxcsr(i8 *) nounwind
|
||||
declare void @llvm.x86.sse.ldmxcsr(i8 *) nounwind
|
||||
@@ -175,7 +191,7 @@ define void @__fastmath() nounwind alwaysinline {
|
||||
%ptr = alloca i32
|
||||
%ptr8 = bitcast i32 * %ptr to i8 *
|
||||
call void @llvm.x86.sse.stmxcsr(i8 * %ptr8)
|
||||
%oldval = load i32 *%ptr
|
||||
%oldval = load PTR_OP_ARGS(`i32 ') %ptr
|
||||
|
||||
; turn on DAZ (64)/FTZ (32768) -> 32832
|
||||
%update = or i32 %oldval, 32832
|
||||
@@ -187,33 +203,51 @@ define void @__fastmath() nounwind alwaysinline {
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; float min/max
|
||||
|
||||
declare <4 x float> @llvm.x86.sse.max.ss(<4 x float>, <4 x float>) nounwind readnone
|
||||
declare <4 x float> @llvm.x86.sse.min.ss(<4 x float>, <4 x float>) nounwind readnone
|
||||
|
||||
define float @__max_uniform_float(float, float) nounwind readonly alwaysinline {
|
||||
sse_binary_scalar(ret, 4, float, @llvm.x86.sse.max.ss, %0, %1)
|
||||
%cmp = fcmp ogt float %1, %0
|
||||
%ret = select i1 %cmp, float %1, float %0
|
||||
ret float %ret
|
||||
}
|
||||
|
||||
define float @__min_uniform_float(float, float) nounwind readonly alwaysinline {
|
||||
sse_binary_scalar(ret, 4, float, @llvm.x86.sse.min.ss, %0, %1)
|
||||
%cmp = fcmp ogt float %1, %0
|
||||
%ret = select i1 %cmp, float %0, float %1
|
||||
ret float %ret
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; double precision min/max
|
||||
|
||||
define double @__min_uniform_double(double, double) nounwind readnone alwaysinline {
|
||||
%cmp = fcmp ogt double %1, %0
|
||||
%ret = select i1 %cmp, double %0, double %1
|
||||
ret double %ret
|
||||
}
|
||||
|
||||
define double @__max_uniform_double(double, double) nounwind readnone alwaysinline {
|
||||
%cmp = fcmp ogt double %1, %0
|
||||
%ret = select i1 %cmp, double %1, double %0
|
||||
ret double %ret
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
declare <4 x i32> @llvm.x86.sse41.pminsd(<4 x i32>, <4 x i32>) nounwind readnone
|
||||
declare <4 x i32> @llvm.x86.sse41.pmaxsd(<4 x i32>, <4 x i32>) nounwind readnone
|
||||
declare <4 x i32> @llvm.x86.sse41.pminud(<4 x i32>, <4 x i32>) nounwind readnone
|
||||
declare <4 x i32> @llvm.x86.sse41.pmaxud(<4 x i32>, <4 x i32>) nounwind readnone
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; int min/max
|
||||
|
||||
declare <4 x i32> @llvm.x86.sse41.pminsd(<4 x i32>, <4 x i32>) nounwind readnone
|
||||
declare <4 x i32> @llvm.x86.sse41.pmaxsd(<4 x i32>, <4 x i32>) nounwind readnone
|
||||
|
||||
define i32 @__min_uniform_int32(i32, i32) nounwind readonly alwaysinline {
|
||||
sse_binary_scalar(ret, 4, i32, @llvm.x86.sse41.pminsd, %0, %1)
|
||||
%cmp = icmp sgt i32 %1, %0
|
||||
%ret = select i1 %cmp, i32 %0, i32 %1
|
||||
ret i32 %ret
|
||||
}
|
||||
|
||||
define i32 @__max_uniform_int32(i32, i32) nounwind readonly alwaysinline {
|
||||
sse_binary_scalar(ret, 4, i32, @llvm.x86.sse41.pmaxsd, %0, %1)
|
||||
%cmp = icmp sgt i32 %1, %0
|
||||
%ret = select i1 %cmp, i32 %1, i32 %0
|
||||
ret i32 %ret
|
||||
}
|
||||
|
||||
@@ -221,21 +255,20 @@ define i32 @__max_uniform_int32(i32, i32) nounwind readonly alwaysinline {
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; unsigned int min/max
|
||||
|
||||
declare <4 x i32> @llvm.x86.sse41.pminud(<4 x i32>, <4 x i32>) nounwind readnone
|
||||
declare <4 x i32> @llvm.x86.sse41.pmaxud(<4 x i32>, <4 x i32>) nounwind readnone
|
||||
|
||||
define i32 @__min_uniform_uint32(i32, i32) nounwind readonly alwaysinline {
|
||||
sse_binary_scalar(ret, 4, i32, @llvm.x86.sse41.pminud, %0, %1)
|
||||
%cmp = icmp ugt i32 %1, %0
|
||||
%ret = select i1 %cmp, i32 %0, i32 %1
|
||||
ret i32 %ret
|
||||
}
|
||||
|
||||
define i32 @__max_uniform_uint32(i32, i32) nounwind readonly alwaysinline {
|
||||
sse_binary_scalar(ret, 4, i32, @llvm.x86.sse41.pmaxud, %0, %1)
|
||||
%cmp = icmp ugt i32 %1, %0
|
||||
%ret = select i1 %cmp, i32 %1, i32 %0
|
||||
ret i32 %ret
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
; horizontal ops
|
||||
;; horizontal ops / reductions
|
||||
|
||||
declare i32 @llvm.ctpop.i32(i32) nounwind readnone
|
||||
|
||||
@@ -251,29 +284,10 @@ define i64 @__popcnt_int64(i64) nounwind readonly alwaysinline {
|
||||
ret i64 %call
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; double precision sqrt
|
||||
|
||||
declare <2 x double> @llvm.x86.sse.sqrt.sd(<2 x double>) nounwind readnone
|
||||
|
||||
define double @__sqrt_uniform_double(double) nounwind alwaysinline {
|
||||
sse_unary_scalar(ret, 2, double, @llvm.x86.sse.sqrt.sd, %0)
|
||||
ret double %ret
|
||||
}
|
||||
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; double precision min/max
|
||||
;; int8/int16 builtins
|
||||
|
||||
declare <2 x double> @llvm.x86.sse2.max.sd(<2 x double>, <2 x double>) nounwind readnone
|
||||
declare <2 x double> @llvm.x86.sse2.min.sd(<2 x double>, <2 x double>) nounwind readnone
|
||||
define_avgs()
|
||||
declare_nvptx()
|
||||
|
||||
define double @__min_uniform_double(double, double) nounwind readnone alwaysinline {
|
||||
sse_binary_scalar(ret, 2, double, @llvm.x86.sse2.min.sd, %0, %1)
|
||||
ret double %ret
|
||||
}
|
||||
|
||||
define double @__max_uniform_double(double, double) nounwind readnone alwaysinline {
|
||||
sse_binary_scalar(ret, 2, double, @llvm.x86.sse2.max.sd, %0, %1)
|
||||
ret double %ret
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
;; Copyright (c) 2010-2011, Intel Corporation
|
||||
;; Copyright (c) 2010-2015, Intel Corporation
|
||||
;; All rights reserved.
|
||||
;;
|
||||
;; Redistribution and use in source and binary forms, with or without
|
||||
@@ -40,6 +40,7 @@ stdlib_core()
|
||||
packed_load_and_store()
|
||||
scans()
|
||||
int64minmax()
|
||||
saturation_arithmetic()
|
||||
|
||||
include(`target-avx-common.ll')
|
||||
|
||||
@@ -137,19 +138,14 @@ define <16 x float> @__sqrt_varying_float(<16 x float>) nounwind readonly always
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; svml
|
||||
|
||||
; FIXME: need either to wire these up to the 8-wide SVML entrypoints,
|
||||
; or, use the macro to call the 4-wide ones 4x with our 16-wide
|
||||
; vectors...
|
||||
include(`svml.m4')
|
||||
;; single precision
|
||||
svml_declare(float,f8,8)
|
||||
svml_define_x(float,f8,8,f,16)
|
||||
|
||||
declare <16 x float> @__svml_sin(<16 x float>)
|
||||
declare <16 x float> @__svml_cos(<16 x float>)
|
||||
declare void @__svml_sincos(<16 x float>, <16 x float> *, <16 x float> *)
|
||||
declare <16 x float> @__svml_tan(<16 x float>)
|
||||
declare <16 x float> @__svml_atan(<16 x float>)
|
||||
declare <16 x float> @__svml_atan2(<16 x float>, <16 x float>)
|
||||
declare <16 x float> @__svml_exp(<16 x float>)
|
||||
declare <16 x float> @__svml_log(<16 x float>)
|
||||
declare <16 x float> @__svml_pow(<16 x float>, <16 x float>)
|
||||
;; double precision
|
||||
svml_declare(double,4,4)
|
||||
svml_define_x(double,4,4,d,16)
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; float min/max
|
||||
@@ -158,51 +154,24 @@ declare <8 x float> @llvm.x86.avx.max.ps.256(<8 x float>, <8 x float>) nounwind
|
||||
declare <8 x float> @llvm.x86.avx.min.ps.256(<8 x float>, <8 x float>) nounwind readnone
|
||||
|
||||
define <16 x float> @__max_varying_float(<16 x float>,
|
||||
<16 x float>) nounwind readonly alwaysinline {
|
||||
<16 x float>) nounwind readonly alwaysinline {
|
||||
binary8to16(call, float, @llvm.x86.avx.max.ps.256, %0, %1)
|
||||
ret <16 x float> %call
|
||||
}
|
||||
|
||||
define <16 x float> @__min_varying_float(<16 x float>,
|
||||
<16 x float>) nounwind readonly alwaysinline {
|
||||
<16 x float>) nounwind readonly alwaysinline {
|
||||
binary8to16(call, float, @llvm.x86.avx.min.ps.256, %0, %1)
|
||||
ret <16 x float> %call
|
||||
}
|
||||
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; int min/max
|
||||
|
||||
define <16 x i32> @__min_varying_int32(<16 x i32>, <16 x i32>) nounwind readonly alwaysinline {
|
||||
binary4to16(ret, i32, @llvm.x86.sse41.pminsd, %0, %1)
|
||||
ret <16 x i32> %ret
|
||||
}
|
||||
|
||||
define <16 x i32> @__max_varying_int32(<16 x i32>, <16 x i32>) nounwind readonly alwaysinline {
|
||||
binary4to16(ret, i32, @llvm.x86.sse41.pmaxsd, %0, %1)
|
||||
ret <16 x i32> %ret
|
||||
}
|
||||
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; unsigned int min/max
|
||||
|
||||
define <16 x i32> @__min_varying_uint32(<16 x i32>, <16 x i32>) nounwind readonly alwaysinline {
|
||||
binary4to16(ret, i32, @llvm.x86.sse41.pminud, %0, %1)
|
||||
ret <16 x i32> %ret
|
||||
}
|
||||
|
||||
define <16 x i32> @__max_varying_uint32(<16 x i32>, <16 x i32>) nounwind readonly alwaysinline {
|
||||
binary4to16(ret, i32, @llvm.x86.sse41.pmaxud, %0, %1)
|
||||
ret <16 x i32> %ret
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
; horizontal ops
|
||||
|
||||
declare i32 @llvm.x86.avx.movmsk.ps.256(<8 x float>) nounwind readnone
|
||||
|
||||
define i32 @__movmsk(<16 x i32>) nounwind readnone alwaysinline {
|
||||
define i64 @__movmsk(<16 x i32>) nounwind readnone alwaysinline {
|
||||
%floatmask = bitcast <16 x i32> %0 to <16 x float>
|
||||
%mask0 = shufflevector <16 x float> %floatmask, <16 x float> undef,
|
||||
<8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
||||
@@ -213,9 +182,57 @@ define i32 @__movmsk(<16 x i32>) nounwind readnone alwaysinline {
|
||||
|
||||
%v1shift = shl i32 %v1, 8
|
||||
%v = or i32 %v1shift, %v0
|
||||
ret i32 %v
|
||||
%v64 = zext i32 %v to i64
|
||||
ret i64 %v64
|
||||
}
|
||||
|
||||
define i1 @__any(<16 x i32>) nounwind readnone alwaysinline {
|
||||
%floatmask = bitcast <16 x i32> %0 to <16 x float>
|
||||
%mask0 = shufflevector <16 x float> %floatmask, <16 x float> undef,
|
||||
<8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
||||
%v0 = call i32 @llvm.x86.avx.movmsk.ps.256(<8 x float> %mask0) nounwind readnone
|
||||
%mask1 = shufflevector <16 x float> %floatmask, <16 x float> undef,
|
||||
<8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
|
||||
%v1 = call i32 @llvm.x86.avx.movmsk.ps.256(<8 x float> %mask1) nounwind readnone
|
||||
|
||||
%v1shift = shl i32 %v1, 8
|
||||
%v = or i32 %v1shift, %v0
|
||||
%cmp = icmp ne i32 %v, 0
|
||||
ret i1 %cmp
|
||||
}
|
||||
|
||||
define i1 @__all(<16 x i32>) nounwind readnone alwaysinline {
|
||||
%floatmask = bitcast <16 x i32> %0 to <16 x float>
|
||||
%mask0 = shufflevector <16 x float> %floatmask, <16 x float> undef,
|
||||
<8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
||||
%v0 = call i32 @llvm.x86.avx.movmsk.ps.256(<8 x float> %mask0) nounwind readnone
|
||||
%mask1 = shufflevector <16 x float> %floatmask, <16 x float> undef,
|
||||
<8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
|
||||
%v1 = call i32 @llvm.x86.avx.movmsk.ps.256(<8 x float> %mask1) nounwind readnone
|
||||
|
||||
%v1shift = shl i32 %v1, 8
|
||||
%v = or i32 %v1shift, %v0
|
||||
%cmp = icmp eq i32 %v, 65535
|
||||
ret i1 %cmp
|
||||
}
|
||||
|
||||
define i1 @__none(<16 x i32>) nounwind readnone alwaysinline {
|
||||
%floatmask = bitcast <16 x i32> %0 to <16 x float>
|
||||
%mask0 = shufflevector <16 x float> %floatmask, <16 x float> undef,
|
||||
<8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
||||
%v0 = call i32 @llvm.x86.avx.movmsk.ps.256(<8 x float> %mask0) nounwind readnone
|
||||
%mask1 = shufflevector <16 x float> %floatmask, <16 x float> undef,
|
||||
<8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
|
||||
%v1 = call i32 @llvm.x86.avx.movmsk.ps.256(<8 x float> %mask1) nounwind readnone
|
||||
|
||||
%v1shift = shl i32 %v1, 8
|
||||
%v = or i32 %v1shift, %v0
|
||||
%cmp = icmp eq i32 %v, 0
|
||||
ret i1 %cmp
|
||||
}
|
||||
|
||||
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; horizontal float ops
|
||||
|
||||
@@ -250,8 +267,35 @@ reduce_equal(16)
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; horizontal int32 ops
|
||||
|
||||
declare <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8>, <16 x i8>) nounwind readnone
|
||||
|
||||
define i16 @__reduce_add_int8(<16 x i8>) nounwind readnone alwaysinline {
|
||||
%rv = call <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8> %0,
|
||||
<16 x i8> zeroinitializer)
|
||||
%r0 = extractelement <2 x i64> %rv, i32 0
|
||||
%r1 = extractelement <2 x i64> %rv, i32 1
|
||||
%r = add i64 %r0, %r1
|
||||
%r16 = trunc i64 %r to i16
|
||||
ret i16 %r16
|
||||
}
|
||||
|
||||
define internal <16 x i16> @__add_varying_i16(<16 x i16>,
|
||||
<16 x i16>) nounwind readnone alwaysinline {
|
||||
%r = add <16 x i16> %0, %1
|
||||
ret <16 x i16> %r
|
||||
}
|
||||
|
||||
define internal i16 @__add_uniform_i16(i16, i16) nounwind readnone alwaysinline {
|
||||
%r = add i16 %0, %1
|
||||
ret i16 %r
|
||||
}
|
||||
|
||||
define i16 @__reduce_add_int16(<16 x i16>) nounwind readnone alwaysinline {
|
||||
reduce16(i16, @__add_varying_i16, @__add_uniform_i16)
|
||||
}
|
||||
|
||||
define <16 x i32> @__add_varying_int32(<16 x i32>,
|
||||
<16 x i32>) nounwind readnone alwaysinline {
|
||||
<16 x i32>) nounwind readnone alwaysinline {
|
||||
%s = add <16 x i32> %0, %1
|
||||
ret <16 x i32> %s
|
||||
}
|
||||
@@ -279,11 +323,6 @@ define i32 @__reduce_max_int32(<16 x i32>) nounwind readnone alwaysinline {
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;;; horizontal uint32 ops
|
||||
|
||||
define i32 @__reduce_add_uint32(<16 x i32> %v) nounwind readnone alwaysinline {
|
||||
%r = call i32 @__reduce_add_int32(<16 x i32> %v)
|
||||
ret i32 %r
|
||||
}
|
||||
|
||||
define i32 @__reduce_min_uint32(<16 x i32>) nounwind readnone alwaysinline {
|
||||
reduce16(i32, @__min_varying_uint32, @__min_uniform_uint32)
|
||||
}
|
||||
@@ -361,11 +400,6 @@ define i64 @__reduce_max_int64(<16 x i64>) nounwind readnone alwaysinline {
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;;; horizontal uint64 ops
|
||||
|
||||
define i64 @__reduce_add_uint64(<16 x i64> %v) nounwind readnone alwaysinline {
|
||||
%r = call i64 @__reduce_add_int64(<16 x i64> %v)
|
||||
ret i64 %r
|
||||
}
|
||||
|
||||
define i64 @__reduce_min_uint64(<16 x i64>) nounwind readnone alwaysinline {
|
||||
reduce16(i64, @__min_varying_uint64, @__min_uniform_uint64)
|
||||
}
|
||||
@@ -379,27 +413,22 @@ define i64 @__reduce_max_uint64(<16 x i64>) nounwind readnone alwaysinline {
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; unaligned loads/loads+broadcasts
|
||||
|
||||
load_and_broadcast(16, i8, 8)
|
||||
load_and_broadcast(16, i16, 16)
|
||||
load_and_broadcast(16, i32, 32)
|
||||
load_and_broadcast(16, i64, 64)
|
||||
|
||||
; no masked load instruction for i8 and i16 types??
|
||||
masked_load(16, i8, 8, 1)
|
||||
masked_load(16, i16, 16, 2)
|
||||
masked_load(i8, 1)
|
||||
masked_load(i16, 2)
|
||||
|
||||
declare <8 x float> @llvm.x86.avx.maskload.ps.256(i8 *, <8 x float> %mask)
|
||||
declare <4 x double> @llvm.x86.avx.maskload.pd.256(i8 *, <4 x double> %mask)
|
||||
declare <8 x float> @llvm.x86.avx.maskload.ps.256(i8 *, <8 x MfORi32> %mask)
|
||||
declare <4 x double> @llvm.x86.avx.maskload.pd.256(i8 *, <4 x MdORi64> %mask)
|
||||
|
||||
define <16 x i32> @__masked_load_32(i8 *, <16 x i32> %mask) nounwind alwaysinline {
|
||||
%floatmask = bitcast <16 x i32> %mask to <16 x float>
|
||||
%mask0 = shufflevector <16 x float> %floatmask, <16 x float> undef,
|
||||
define <16 x i32> @__masked_load_i32(i8 *, <16 x i32> %mask) nounwind alwaysinline {
|
||||
%floatmask = bitcast <16 x i32> %mask to <16 x MfORi32>
|
||||
%mask0 = shufflevector <16 x MfORi32> %floatmask, <16 x MfORi32> undef,
|
||||
<8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
||||
%val0 = call <8 x float> @llvm.x86.avx.maskload.ps.256(i8 * %0, <8 x float> %mask0)
|
||||
%mask1 = shufflevector <16 x float> %floatmask, <16 x float> undef,
|
||||
%val0 = call <8 x float> @llvm.x86.avx.maskload.ps.256(i8 * %0, <8 x MfORi32> %mask0)
|
||||
%mask1 = shufflevector <16 x MfORi32> %floatmask, <16 x MfORi32> undef,
|
||||
<8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
|
||||
%ptr1 = getelementptr i8 * %0, i32 32 ;; 8x4 bytes = 32
|
||||
%val1 = call <8 x float> @llvm.x86.avx.maskload.ps.256(i8 * %ptr1, <8 x float> %mask1)
|
||||
%ptr1 = getelementptr PTR_OP_ARGS(`i8') %0, i32 32 ;; 8x4 bytes = 32
|
||||
%val1 = call <8 x float> @llvm.x86.avx.maskload.ps.256(i8 * %ptr1, <8 x MfORi32> %mask1)
|
||||
|
||||
%retval = shufflevector <8 x float> %val0, <8 x float> %val1,
|
||||
<16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
|
||||
@@ -409,7 +438,7 @@ define <16 x i32> @__masked_load_32(i8 *, <16 x i32> %mask) nounwind alwaysinlin
|
||||
}
|
||||
|
||||
|
||||
define <16 x i64> @__masked_load_64(i8 *, <16 x i32> %mask) nounwind alwaysinline {
|
||||
define <16 x i64> @__masked_load_i64(i8 *, <16 x i32> %mask) nounwind alwaysinline {
|
||||
; double up masks, bitcast to doubles
|
||||
%mask0 = shufflevector <16 x i32> %mask, <16 x i32> undef,
|
||||
<8 x i32> <i32 0, i32 0, i32 1, i32 1, i32 2, i32 2, i32 3, i32 3>
|
||||
@@ -419,18 +448,18 @@ define <16 x i64> @__masked_load_64(i8 *, <16 x i32> %mask) nounwind alwaysinlin
|
||||
<8 x i32> <i32 8, i32 8, i32 9, i32 9, i32 10, i32 10, i32 11, i32 11>
|
||||
%mask3 = shufflevector <16 x i32> %mask, <16 x i32> undef,
|
||||
<8 x i32> <i32 12, i32 12, i32 13, i32 13, i32 14, i32 14, i32 15, i32 15>
|
||||
%mask0d = bitcast <8 x i32> %mask0 to <4 x double>
|
||||
%mask1d = bitcast <8 x i32> %mask1 to <4 x double>
|
||||
%mask2d = bitcast <8 x i32> %mask2 to <4 x double>
|
||||
%mask3d = bitcast <8 x i32> %mask3 to <4 x double>
|
||||
%mask0d = bitcast <8 x i32> %mask0 to <4 x MdORi64>
|
||||
%mask1d = bitcast <8 x i32> %mask1 to <4 x MdORi64>
|
||||
%mask2d = bitcast <8 x i32> %mask2 to <4 x MdORi64>
|
||||
%mask3d = bitcast <8 x i32> %mask3 to <4 x MdORi64>
|
||||
|
||||
%val0d = call <4 x double> @llvm.x86.avx.maskload.pd.256(i8 * %0, <4 x double> %mask0d)
|
||||
%ptr1 = getelementptr i8 * %0, i32 32
|
||||
%val1d = call <4 x double> @llvm.x86.avx.maskload.pd.256(i8 * %ptr1, <4 x double> %mask1d)
|
||||
%ptr2 = getelementptr i8 * %0, i32 64
|
||||
%val2d = call <4 x double> @llvm.x86.avx.maskload.pd.256(i8 * %ptr2, <4 x double> %mask2d)
|
||||
%ptr3 = getelementptr i8 * %0, i32 96
|
||||
%val3d = call <4 x double> @llvm.x86.avx.maskload.pd.256(i8 * %ptr3, <4 x double> %mask3d)
|
||||
%val0d = call <4 x double> @llvm.x86.avx.maskload.pd.256(i8 * %0, <4 x MdORi64> %mask0d)
|
||||
%ptr1 = getelementptr PTR_OP_ARGS(`i8') %0, i32 32
|
||||
%val1d = call <4 x double> @llvm.x86.avx.maskload.pd.256(i8 * %ptr1, <4 x MdORi64> %mask1d)
|
||||
%ptr2 = getelementptr PTR_OP_ARGS(`i8') %0, i32 64
|
||||
%val2d = call <4 x double> @llvm.x86.avx.maskload.pd.256(i8 * %ptr2, <4 x MdORi64> %mask2d)
|
||||
%ptr3 = getelementptr PTR_OP_ARGS(`i8') %0, i32 96
|
||||
%val3d = call <4 x double> @llvm.x86.avx.maskload.pd.256(i8 * %ptr3, <4 x MdORi64> %mask3d)
|
||||
|
||||
%val01 = shufflevector <4 x double> %val0d, <4 x double> %val1d,
|
||||
<8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
||||
@@ -443,6 +472,7 @@ define <16 x i64> @__masked_load_64(i8 *, <16 x i32> %mask) nounwind alwaysinlin
|
||||
ret <16 x i64> %val
|
||||
}
|
||||
|
||||
masked_load_float_double()
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; masked store
|
||||
@@ -450,38 +480,38 @@ define <16 x i64> @__masked_load_64(i8 *, <16 x i32> %mask) nounwind alwaysinlin
|
||||
; FIXME: there is no AVX instruction for these, but we could be clever
|
||||
; by packing the bits down and setting the last 3/4 or half, respectively,
|
||||
; of the mask to zero... Not sure if this would be a win in the end
|
||||
gen_masked_store(16, i8, 8)
|
||||
gen_masked_store(16, i16, 16)
|
||||
gen_masked_store(i8)
|
||||
gen_masked_store(i16)
|
||||
|
||||
; note that mask is the 2nd parameter, not the 3rd one!!
|
||||
declare void @llvm.x86.avx.maskstore.ps.256(i8 *, <8 x float>, <8 x float>)
|
||||
declare void @llvm.x86.avx.maskstore.pd.256(i8 *, <4 x double>, <4 x double>)
|
||||
declare void @llvm.x86.avx.maskstore.ps.256(i8 *, <8 x MfORi32>, <8 x float>)
|
||||
declare void @llvm.x86.avx.maskstore.pd.256(i8 *, <4 x MdORi64>, <4 x double>)
|
||||
|
||||
define void @__masked_store_32(<16 x i32>* nocapture, <16 x i32>,
|
||||
<16 x i32>) nounwind alwaysinline {
|
||||
define void @__masked_store_i32(<16 x i32>* nocapture, <16 x i32>,
|
||||
<16 x i32>) nounwind alwaysinline {
|
||||
%ptr = bitcast <16 x i32> * %0 to i8 *
|
||||
%val = bitcast <16 x i32> %1 to <16 x float>
|
||||
%mask = bitcast <16 x i32> %2 to <16 x float>
|
||||
%mask = bitcast <16 x i32> %2 to <16 x MfORi32>
|
||||
|
||||
%val0 = shufflevector <16 x float> %val, <16 x float> undef,
|
||||
<8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
||||
%val1 = shufflevector <16 x float> %val, <16 x float> undef,
|
||||
<8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
|
||||
|
||||
%mask0 = shufflevector <16 x float> %mask, <16 x float> undef,
|
||||
%mask0 = shufflevector <16 x MfORi32> %mask, <16 x MfORi32> undef,
|
||||
<8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
||||
%mask1 = shufflevector <16 x float> %mask, <16 x float> undef,
|
||||
%mask1 = shufflevector <16 x MfORi32> %mask, <16 x MfORi32> undef,
|
||||
<8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
|
||||
|
||||
call void @llvm.x86.avx.maskstore.ps.256(i8 * %ptr, <8 x float> %mask0, <8 x float> %val0)
|
||||
%ptr1 = getelementptr i8 * %ptr, i32 32
|
||||
call void @llvm.x86.avx.maskstore.ps.256(i8 * %ptr1, <8 x float> %mask1, <8 x float> %val1)
|
||||
call void @llvm.x86.avx.maskstore.ps.256(i8 * %ptr, <8 x MfORi32> %mask0, <8 x float> %val0)
|
||||
%ptr1 = getelementptr PTR_OP_ARGS(`i8') %ptr, i32 32
|
||||
call void @llvm.x86.avx.maskstore.ps.256(i8 * %ptr1, <8 x MfORi32> %mask1, <8 x float> %val1)
|
||||
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @__masked_store_64(<16 x i64>* nocapture, <16 x i64>,
|
||||
<16 x i32> %mask) nounwind alwaysinline {
|
||||
define void @__masked_store_i64(<16 x i64>* nocapture, <16 x i64>,
|
||||
<16 x i32> %mask) nounwind alwaysinline {
|
||||
%ptr = bitcast <16 x i64> * %0 to i8 *
|
||||
%val = bitcast <16 x i64> %1 to <16 x double>
|
||||
|
||||
@@ -494,10 +524,10 @@ define void @__masked_store_64(<16 x i64>* nocapture, <16 x i64>,
|
||||
<8 x i32> <i32 8, i32 8, i32 9, i32 9, i32 10, i32 10, i32 11, i32 11>
|
||||
%mask3 = shufflevector <16 x i32> %mask, <16 x i32> undef,
|
||||
<8 x i32> <i32 12, i32 12, i32 13, i32 13, i32 14, i32 14, i32 15, i32 15>
|
||||
%mask0d = bitcast <8 x i32> %mask0 to <4 x double>
|
||||
%mask1d = bitcast <8 x i32> %mask1 to <4 x double>
|
||||
%mask2d = bitcast <8 x i32> %mask2 to <4 x double>
|
||||
%mask3d = bitcast <8 x i32> %mask3 to <4 x double>
|
||||
%mask0d = bitcast <8 x i32> %mask0 to <4 x MdORi64>
|
||||
%mask1d = bitcast <8 x i32> %mask1 to <4 x MdORi64>
|
||||
%mask2d = bitcast <8 x i32> %mask2 to <4 x MdORi64>
|
||||
%mask3d = bitcast <8 x i32> %mask3 to <4 x MdORi64>
|
||||
|
||||
%val0 = shufflevector <16 x double> %val, <16 x double> undef,
|
||||
<4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
||||
@@ -508,27 +538,28 @@ define void @__masked_store_64(<16 x i64>* nocapture, <16 x i64>,
|
||||
%val3 = shufflevector <16 x double> %val, <16 x double> undef,
|
||||
<4 x i32> <i32 12, i32 13, i32 14, i32 15>
|
||||
|
||||
call void @llvm.x86.avx.maskstore.pd.256(i8 * %ptr, <4 x double> %mask0d, <4 x double> %val0)
|
||||
%ptr1 = getelementptr i8 * %ptr, i32 32
|
||||
call void @llvm.x86.avx.maskstore.pd.256(i8 * %ptr1, <4 x double> %mask1d, <4 x double> %val1)
|
||||
%ptr2 = getelementptr i8 * %ptr, i32 64
|
||||
call void @llvm.x86.avx.maskstore.pd.256(i8 * %ptr2, <4 x double> %mask2d, <4 x double> %val2)
|
||||
%ptr3 = getelementptr i8 * %ptr, i32 96
|
||||
call void @llvm.x86.avx.maskstore.pd.256(i8 * %ptr3, <4 x double> %mask3d, <4 x double> %val3)
|
||||
call void @llvm.x86.avx.maskstore.pd.256(i8 * %ptr, <4 x MdORi64> %mask0d, <4 x double> %val0)
|
||||
%ptr1 = getelementptr PTR_OP_ARGS(`i8') %ptr, i32 32
|
||||
call void @llvm.x86.avx.maskstore.pd.256(i8 * %ptr1, <4 x MdORi64> %mask1d, <4 x double> %val1)
|
||||
%ptr2 = getelementptr PTR_OP_ARGS(`i8') %ptr, i32 64
|
||||
call void @llvm.x86.avx.maskstore.pd.256(i8 * %ptr2, <4 x MdORi64> %mask2d, <4 x double> %val2)
|
||||
%ptr3 = getelementptr PTR_OP_ARGS(`i8') %ptr, i32 96
|
||||
call void @llvm.x86.avx.maskstore.pd.256(i8 * %ptr3, <4 x MdORi64> %mask3d, <4 x double> %val3)
|
||||
|
||||
ret void
|
||||
}
|
||||
|
||||
masked_store_float_double()
|
||||
|
||||
masked_store_blend_8_16_by_16()
|
||||
|
||||
declare <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float>, <8 x float>,
|
||||
<8 x float>) nounwind readnone
|
||||
|
||||
define void @__masked_store_blend_32(<16 x i32>* nocapture, <16 x i32>,
|
||||
<16 x i32>) nounwind alwaysinline {
|
||||
define void @__masked_store_blend_i32(<16 x i32>* nocapture, <16 x i32>,
|
||||
<16 x i32>) nounwind alwaysinline {
|
||||
%maskAsFloat = bitcast <16 x i32> %2 to <16 x float>
|
||||
%oldValue = load <16 x i32>* %0, align 4
|
||||
%oldValue = load PTR_OP_ARGS(`<16 x i32>') %0, align 4
|
||||
%oldAsFloat = bitcast <16 x i32> %oldValue to <16 x float>
|
||||
%newAsFloat = bitcast <16 x i32> %1 to <16 x float>
|
||||
|
||||
@@ -563,9 +594,9 @@ define void @__masked_store_blend_32(<16 x i32>* nocapture, <16 x i32>,
|
||||
declare <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double>, <4 x double>,
|
||||
<4 x double>) nounwind readnone
|
||||
|
||||
define void @__masked_store_blend_64(<16 x i64>* nocapture %ptr, <16 x i64> %newi64,
|
||||
<16 x i32> %mask) nounwind alwaysinline {
|
||||
%oldValue = load <16 x i64>* %ptr, align 8
|
||||
define void @__masked_store_blend_i64(<16 x i64>* nocapture %ptr, <16 x i64> %newi64,
|
||||
<16 x i32> %mask) nounwind alwaysinline {
|
||||
%oldValue = load PTR_OP_ARGS(`<16 x i64>') %ptr, align 8
|
||||
%old = bitcast <16 x i64> %oldValue to <16 x double>
|
||||
%old0d = shufflevector <16 x double> %old, <16 x double> undef,
|
||||
<4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
||||
@@ -622,17 +653,14 @@ define void @__masked_store_blend_64(<16 x i64>* nocapture %ptr, <16 x i64> %new
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; gather/scatter
|
||||
;; scatter
|
||||
|
||||
gen_gather(16, i8)
|
||||
gen_gather(16, i16)
|
||||
gen_gather(16, i32)
|
||||
gen_gather(16, i64)
|
||||
|
||||
gen_scatter(16, i8)
|
||||
gen_scatter(16, i16)
|
||||
gen_scatter(16, i32)
|
||||
gen_scatter(16, i64)
|
||||
gen_scatter(i8)
|
||||
gen_scatter(i16)
|
||||
gen_scatter(i32)
|
||||
gen_scatter(float)
|
||||
gen_scatter(i64)
|
||||
gen_scatter(double)
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; double precision sqrt
|
||||
@@ -660,3 +688,12 @@ define <16 x double> @__max_varying_double(<16 x double>, <16 x double>) nounwin
|
||||
binary4to16(ret, double, @llvm.x86.avx.max.pd.256, %0, %1)
|
||||
ret <16 x double> %ret
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; reciprocals in double precision, if supported
|
||||
|
||||
rsqrtd_decl()
|
||||
rcpd_decl()
|
||||
|
||||
transcendetals_decl()
|
||||
trigonometry_decl()
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
;; Copyright (c) 2010-2011, Intel Corporation
|
||||
;; Copyright (c) 2010-2015, Intel Corporation
|
||||
;; All rights reserved.
|
||||
;;
|
||||
;; Redistribution and use in source and binary forms, with or without
|
||||
@@ -49,11 +49,10 @@ include(`target-avx-common.ll')
|
||||
declare <8 x float> @llvm.x86.avx.rcp.ps.256(<8 x float>) nounwind readnone
|
||||
|
||||
define <8 x float> @__rcp_varying_float(<8 x float>) nounwind readonly alwaysinline {
|
||||
; do one N-R iteration to improve precision
|
||||
; float iv = __rcp_v(v);
|
||||
; return iv * (2. - v * iv);
|
||||
|
||||
%call = call <8 x float> @llvm.x86.avx.rcp.ps.256(<8 x float> %0)
|
||||
; do one N-R iteration
|
||||
%v_iv = fmul <8 x float> %0, %call
|
||||
%two_minus = fsub <8 x float> <float 2., float 2., float 2., float 2.,
|
||||
float 2., float 2., float 2., float 2.>, %v_iv
|
||||
@@ -61,6 +60,46 @@ define <8 x float> @__rcp_varying_float(<8 x float>) nounwind readonly alwaysinl
|
||||
ret <8 x float> %iv_mul
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; rsqrt
|
||||
|
||||
declare <8 x float> @llvm.x86.avx.rsqrt.ps.256(<8 x float>) nounwind readnone
|
||||
|
||||
define <8 x float> @__rsqrt_varying_float(<8 x float> %v) nounwind readonly alwaysinline {
|
||||
; float is = __rsqrt_v(v);
|
||||
%is = call <8 x float> @llvm.x86.avx.rsqrt.ps.256(<8 x float> %v)
|
||||
; Newton-Raphson iteration to improve precision
|
||||
; return 0.5 * is * (3. - (v * is) * is);
|
||||
%v_is = fmul <8 x float> %v, %is
|
||||
%v_is_is = fmul <8 x float> %v_is, %is
|
||||
%three_sub = fsub <8 x float> <float 3., float 3., float 3., float 3.,
|
||||
float 3., float 3., float 3., float 3.>, %v_is_is
|
||||
%is_mul = fmul <8 x float> %is, %three_sub
|
||||
%half_scale = fmul <8 x float> <float 0.5, float 0.5, float 0.5, float 0.5,
|
||||
float 0.5, float 0.5, float 0.5, float 0.5>, %is_mul
|
||||
ret <8 x float> %half_scale
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; sqrt
|
||||
|
||||
declare <8 x float> @llvm.x86.avx.sqrt.ps.256(<8 x float>) nounwind readnone
|
||||
|
||||
define <8 x float> @__sqrt_varying_float(<8 x float>) nounwind readonly alwaysinline {
|
||||
%call = call <8 x float> @llvm.x86.avx.sqrt.ps.256(<8 x float> %0)
|
||||
ret <8 x float> %call
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; double precision sqrt
|
||||
|
||||
declare <4 x double> @llvm.x86.avx.sqrt.pd.256(<4 x double>) nounwind readnone
|
||||
|
||||
define <8 x double> @__sqrt_varying_double(<8 x double>) nounwind alwaysinline {
|
||||
unary4to8(ret, double, @llvm.x86.avx.sqrt.pd.256, %0)
|
||||
ret <8 x double> %ret
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; rounding floats
|
||||
|
||||
@@ -94,63 +133,15 @@ define <8 x double> @__round_varying_double(<8 x double>) nounwind readonly alwa
|
||||
}
|
||||
|
||||
define <8 x double> @__floor_varying_double(<8 x double>) nounwind readonly alwaysinline {
|
||||
; roundpd, round down 0b01 | don't signal precision exceptions 0b1000 = 9
|
||||
; roundpd, round down 0b01 | don't signal precision exceptions 0b1001 = 9
|
||||
round4to8double(%0, 9)
|
||||
}
|
||||
|
||||
|
||||
define <8 x double> @__ceil_varying_double(<8 x double>) nounwind readonly alwaysinline {
|
||||
; roundpd, round up 0b10 | don't signal precision exceptions 0b1000 = 10
|
||||
; roundpd, round up 0b10 | don't signal precision exceptions 0b1010 = 10
|
||||
round4to8double(%0, 10)
|
||||
}
|
||||
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; rsqrt
|
||||
|
||||
declare <8 x float> @llvm.x86.avx.rsqrt.ps.256(<8 x float>) nounwind readnone
|
||||
|
||||
define <8 x float> @__rsqrt_varying_float(<8 x float> %v) nounwind readonly alwaysinline {
|
||||
; float is = __rsqrt_v(v);
|
||||
%is = call <8 x float> @llvm.x86.avx.rsqrt.ps.256(<8 x float> %v)
|
||||
; return 0.5 * is * (3. - (v * is) * is);
|
||||
%v_is = fmul <8 x float> %v, %is
|
||||
%v_is_is = fmul <8 x float> %v_is, %is
|
||||
%three_sub = fsub <8 x float> <float 3., float 3., float 3., float 3.,
|
||||
float 3., float 3., float 3., float 3.>, %v_is_is
|
||||
%is_mul = fmul <8 x float> %is, %three_sub
|
||||
%half_scale = fmul <8 x float> <float 0.5, float 0.5, float 0.5, float 0.5,
|
||||
float 0.5, float 0.5, float 0.5, float 0.5>, %is_mul
|
||||
ret <8 x float> %half_scale
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; sqrt
|
||||
|
||||
declare <8 x float> @llvm.x86.avx.sqrt.ps.256(<8 x float>) nounwind readnone
|
||||
|
||||
define <8 x float> @__sqrt_varying_float(<8 x float>) nounwind readonly alwaysinline {
|
||||
%call = call <8 x float> @llvm.x86.avx.sqrt.ps.256(<8 x float> %0)
|
||||
ret <8 x float> %call
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; svml
|
||||
|
||||
; FIXME: need either to wire these up to the 8-wide SVML entrypoints,
|
||||
; or, use the macro to call the 4-wide ones twice with our 8-wide
|
||||
; vectors...
|
||||
|
||||
declare <8 x float> @__svml_sin(<8 x float>)
|
||||
declare <8 x float> @__svml_cos(<8 x float>)
|
||||
declare void @__svml_sincos(<8 x float>, <8 x float> *, <8 x float> *)
|
||||
declare <8 x float> @__svml_tan(<8 x float>)
|
||||
declare <8 x float> @__svml_atan(<8 x float>)
|
||||
declare <8 x float> @__svml_atan2(<8 x float>, <8 x float>)
|
||||
declare <8 x float> @__svml_exp(<8 x float>)
|
||||
declare <8 x float> @__svml_log(<8 x float>)
|
||||
declare <8 x float> @__svml_pow(<8 x float>, <8 x float>)
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; float min/max
|
||||
|
||||
@@ -158,56 +149,84 @@ declare <8 x float> @llvm.x86.avx.max.ps.256(<8 x float>, <8 x float>) nounwind
|
||||
declare <8 x float> @llvm.x86.avx.min.ps.256(<8 x float>, <8 x float>) nounwind readnone
|
||||
|
||||
define <8 x float> @__max_varying_float(<8 x float>,
|
||||
<8 x float>) nounwind readonly alwaysinline {
|
||||
<8 x float>) nounwind readonly alwaysinline {
|
||||
%call = call <8 x float> @llvm.x86.avx.max.ps.256(<8 x float> %0, <8 x float> %1)
|
||||
ret <8 x float> %call
|
||||
}
|
||||
|
||||
define <8 x float> @__min_varying_float(<8 x float>,
|
||||
<8 x float>) nounwind readonly alwaysinline {
|
||||
<8 x float>) nounwind readonly alwaysinline {
|
||||
%call = call <8 x float> @llvm.x86.avx.min.ps.256(<8 x float> %0, <8 x float> %1)
|
||||
ret <8 x float> %call
|
||||
}
|
||||
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; int min/max
|
||||
;; double precision min/max
|
||||
|
||||
define <8 x i32> @__min_varying_int32(<8 x i32>, <8 x i32>) nounwind readonly alwaysinline {
|
||||
binary4to8(ret, i32, @llvm.x86.sse41.pminsd, %0, %1)
|
||||
ret <8 x i32> %ret
|
||||
declare <4 x double> @llvm.x86.avx.max.pd.256(<4 x double>, <4 x double>) nounwind readnone
|
||||
declare <4 x double> @llvm.x86.avx.min.pd.256(<4 x double>, <4 x double>) nounwind readnone
|
||||
|
||||
define <8 x double> @__min_varying_double(<8 x double>, <8 x double>) nounwind readnone alwaysinline {
|
||||
binary4to8(ret, double, @llvm.x86.avx.min.pd.256, %0, %1)
|
||||
ret <8 x double> %ret
|
||||
}
|
||||
|
||||
define <8 x i32> @__max_varying_int32(<8 x i32>, <8 x i32>) nounwind readonly alwaysinline {
|
||||
binary4to8(ret, i32, @llvm.x86.sse41.pmaxsd, %0, %1)
|
||||
ret <8 x i32> %ret
|
||||
define <8 x double> @__max_varying_double(<8 x double>, <8 x double>) nounwind readnone alwaysinline {
|
||||
binary4to8(ret, double, @llvm.x86.avx.max.pd.256, %0, %1)
|
||||
ret <8 x double> %ret
|
||||
}
|
||||
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; unsigned int min/max
|
||||
;; svml
|
||||
|
||||
define <8 x i32> @__min_varying_uint32(<8 x i32>, <8 x i32>) nounwind readonly alwaysinline {
|
||||
binary4to8(ret, i32, @llvm.x86.sse41.pminud, %0, %1)
|
||||
ret <8 x i32> %ret
|
||||
}
|
||||
include(`svml.m4')
|
||||
;; single precision
|
||||
svml_declare(float,f8,8)
|
||||
svml_define(float,f8,8,f)
|
||||
|
||||
;; double precision
|
||||
svml_declare(double,4,4)
|
||||
svml_define_x(double,4,4,d,8)
|
||||
|
||||
define <8 x i32> @__max_varying_uint32(<8 x i32>, <8 x i32>) nounwind readonly alwaysinline {
|
||||
binary4to8(ret, i32, @llvm.x86.sse41.pmaxud, %0, %1)
|
||||
ret <8 x i32> %ret
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
; horizontal ops
|
||||
;; mask handling
|
||||
|
||||
declare i32 @llvm.x86.avx.movmsk.ps.256(<8 x float>) nounwind readnone
|
||||
|
||||
define i32 @__movmsk(<8 x i32>) nounwind readnone alwaysinline {
|
||||
define i64 @__movmsk(<8 x i32>) nounwind readnone alwaysinline {
|
||||
%floatmask = bitcast <8 x i32> %0 to <8 x float>
|
||||
%v = call i32 @llvm.x86.avx.movmsk.ps.256(<8 x float> %floatmask) nounwind readnone
|
||||
ret i32 %v
|
||||
%v64 = zext i32 %v to i64
|
||||
ret i64 %v64
|
||||
}
|
||||
|
||||
define i1 @__any(<8 x i32>) nounwind readnone alwaysinline {
|
||||
%floatmask = bitcast <8 x i32> %0 to <8 x float>
|
||||
%v = call i32 @llvm.x86.avx.movmsk.ps.256(<8 x float> %floatmask) nounwind readnone
|
||||
%cmp = icmp ne i32 %v, 0
|
||||
ret i1 %cmp
|
||||
}
|
||||
|
||||
define i1 @__all(<8 x i32>) nounwind readnone alwaysinline {
|
||||
%floatmask = bitcast <8 x i32> %0 to <8 x float>
|
||||
%v = call i32 @llvm.x86.avx.movmsk.ps.256(<8 x float> %floatmask) nounwind readnone
|
||||
%cmp = icmp eq i32 %v, 255
|
||||
ret i1 %cmp
|
||||
}
|
||||
|
||||
define i1 @__none(<8 x i32>) nounwind readnone alwaysinline {
|
||||
%floatmask = bitcast <8 x i32> %0 to <8 x float>
|
||||
%v = call i32 @llvm.x86.avx.movmsk.ps.256(<8 x float> %floatmask) nounwind readnone
|
||||
%cmp = icmp eq i32 %v, 0
|
||||
ret i1 %cmp
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; horizontal ops / reductions
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; horizontal float ops
|
||||
|
||||
@@ -222,65 +241,14 @@ define float @__reduce_add_float(<8 x float>) nounwind readonly alwaysinline {
|
||||
ret float %sum
|
||||
}
|
||||
|
||||
|
||||
define float @__reduce_min_float(<8 x float>) nounwind readnone alwaysinline {
|
||||
reduce8(float, @__min_varying_float, @__min_uniform_float)
|
||||
}
|
||||
|
||||
|
||||
define float @__reduce_max_float(<8 x float>) nounwind readnone alwaysinline {
|
||||
reduce8(float, @__max_varying_float, @__max_uniform_float)
|
||||
}
|
||||
|
||||
reduce_equal(8)
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; horizontal int32 ops
|
||||
|
||||
define <8 x i32> @__add_varying_int32(<8 x i32>,
|
||||
<8 x i32>) nounwind readnone alwaysinline {
|
||||
%s = add <8 x i32> %0, %1
|
||||
ret <8 x i32> %s
|
||||
}
|
||||
|
||||
define i32 @__add_uniform_int32(i32, i32) nounwind readnone alwaysinline {
|
||||
%s = add i32 %0, %1
|
||||
ret i32 %s
|
||||
}
|
||||
|
||||
define i32 @__reduce_add_int32(<8 x i32>) nounwind readnone alwaysinline {
|
||||
reduce8(i32, @__add_varying_int32, @__add_uniform_int32)
|
||||
}
|
||||
|
||||
|
||||
define i32 @__reduce_min_int32(<8 x i32>) nounwind readnone alwaysinline {
|
||||
reduce8(i32, @__min_varying_int32, @__min_uniform_int32)
|
||||
}
|
||||
|
||||
|
||||
define i32 @__reduce_max_int32(<8 x i32>) nounwind readnone alwaysinline {
|
||||
reduce8(i32, @__max_varying_int32, @__max_uniform_int32)
|
||||
}
|
||||
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;;; horizontal uint32 ops
|
||||
|
||||
define i32 @__reduce_add_uint32(<8 x i32> %v) nounwind readnone alwaysinline {
|
||||
%r = call i32 @__reduce_add_int32(<8 x i32> %v)
|
||||
ret i32 %r
|
||||
}
|
||||
|
||||
define i32 @__reduce_min_uint32(<8 x i32>) nounwind readnone alwaysinline {
|
||||
reduce8(i32, @__min_varying_uint32, @__min_uniform_uint32)
|
||||
}
|
||||
|
||||
|
||||
define i32 @__reduce_max_uint32(<8 x i32>) nounwind readnone alwaysinline {
|
||||
reduce8(i32, @__max_varying_uint32, @__max_uniform_uint32)
|
||||
}
|
||||
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; horizontal double ops
|
||||
|
||||
@@ -304,17 +272,89 @@ define double @__reduce_min_double(<8 x double>) nounwind readnone alwaysinline
|
||||
reduce8(double, @__min_varying_double, @__min_uniform_double)
|
||||
}
|
||||
|
||||
|
||||
define double @__reduce_max_double(<8 x double>) nounwind readnone alwaysinline {
|
||||
reduce8(double, @__max_varying_double, @__max_uniform_double)
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; horizontal int8 ops
|
||||
|
||||
declare <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8>, <16 x i8>) nounwind readnone
|
||||
|
||||
define i16 @__reduce_add_int8(<8 x i8>) nounwind readnone alwaysinline {
|
||||
%wide8 = shufflevector <8 x i8> %0, <8 x i8> zeroinitializer,
|
||||
<16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
|
||||
i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8>
|
||||
%rv = call <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8> %wide8,
|
||||
<16 x i8> zeroinitializer)
|
||||
%r0 = extractelement <2 x i64> %rv, i32 0
|
||||
%r1 = extractelement <2 x i64> %rv, i32 1
|
||||
%r = add i64 %r0, %r1
|
||||
%r16 = trunc i64 %r to i16
|
||||
ret i16 %r16
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; horizontal int16 ops
|
||||
|
||||
define internal <8 x i16> @__add_varying_i16(<8 x i16>,
|
||||
<8 x i16>) nounwind readnone alwaysinline {
|
||||
%r = add <8 x i16> %0, %1
|
||||
ret <8 x i16> %r
|
||||
}
|
||||
|
||||
define internal i16 @__add_uniform_i16(i16, i16) nounwind readnone alwaysinline {
|
||||
%r = add i16 %0, %1
|
||||
ret i16 %r
|
||||
}
|
||||
|
||||
define i16 @__reduce_add_int16(<8 x i16>) nounwind readnone alwaysinline {
|
||||
reduce8(i16, @__add_varying_i16, @__add_uniform_i16)
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; horizontal int32 ops
|
||||
|
||||
;; helper functions
|
||||
define <8 x i32> @__add_varying_int32(<8 x i32>,
|
||||
<8 x i32>) nounwind readnone alwaysinline {
|
||||
%s = add <8 x i32> %0, %1
|
||||
ret <8 x i32> %s
|
||||
}
|
||||
|
||||
define i32 @__add_uniform_int32(i32, i32) nounwind readnone alwaysinline {
|
||||
%s = add i32 %0, %1
|
||||
ret i32 %s
|
||||
}
|
||||
|
||||
;; reduction functions
|
||||
define i32 @__reduce_add_int32(<8 x i32>) nounwind readnone alwaysinline {
|
||||
reduce8(i32, @__add_varying_int32, @__add_uniform_int32)
|
||||
}
|
||||
|
||||
define i32 @__reduce_min_int32(<8 x i32>) nounwind readnone alwaysinline {
|
||||
reduce8(i32, @__min_varying_int32, @__min_uniform_int32)
|
||||
}
|
||||
|
||||
define i32 @__reduce_max_int32(<8 x i32>) nounwind readnone alwaysinline {
|
||||
reduce8(i32, @__max_varying_int32, @__max_uniform_int32)
|
||||
}
|
||||
|
||||
define i32 @__reduce_min_uint32(<8 x i32>) nounwind readnone alwaysinline {
|
||||
reduce8(i32, @__min_varying_uint32, @__min_uniform_uint32)
|
||||
}
|
||||
|
||||
define i32 @__reduce_max_uint32(<8 x i32>) nounwind readnone alwaysinline {
|
||||
reduce8(i32, @__max_varying_uint32, @__max_uniform_uint32)
|
||||
}
|
||||
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; horizontal int64 ops
|
||||
|
||||
;; helper functions
|
||||
define <8 x i64> @__add_varying_int64(<8 x i64>,
|
||||
<8 x i64>) nounwind readnone alwaysinline {
|
||||
<8 x i64>) nounwind readnone alwaysinline {
|
||||
%s = add <8 x i64> %0, %1
|
||||
ret <8 x i64> %s
|
||||
}
|
||||
@@ -324,6 +364,7 @@ define i64 @__add_uniform_int64(i64, i64) nounwind readnone alwaysinline {
|
||||
ret i64 %s
|
||||
}
|
||||
|
||||
;; reduction functions
|
||||
define i64 @__reduce_add_int64(<8 x i64>) nounwind readnone alwaysinline {
|
||||
reduce8(i64, @__add_varying_int64, @__add_uniform_int64)
|
||||
}
|
||||
@@ -339,14 +380,6 @@ define i64 @__reduce_max_int64(<8 x i64>) nounwind readnone alwaysinline {
|
||||
}
|
||||
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;;; horizontal uint64 ops
|
||||
|
||||
define i64 @__reduce_add_uint64(<8 x i64> %v) nounwind readnone alwaysinline {
|
||||
%r = call i64 @__reduce_add_int64(<8 x i64> %v)
|
||||
ret i64 %r
|
||||
}
|
||||
|
||||
define i64 @__reduce_min_uint64(<8 x i64>) nounwind readnone alwaysinline {
|
||||
reduce8(i64, @__min_varying_uint64, @__min_uniform_uint64)
|
||||
}
|
||||
@@ -356,42 +389,39 @@ define i64 @__reduce_max_uint64(<8 x i64>) nounwind readnone alwaysinline {
|
||||
reduce8(i64, @__max_varying_uint64, @__max_uniform_uint64)
|
||||
}
|
||||
|
||||
reduce_equal(8)
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; unaligned loads/loads+broadcasts
|
||||
|
||||
load_and_broadcast(8, i8, 8)
|
||||
load_and_broadcast(8, i16, 16)
|
||||
load_and_broadcast(8, i32, 32)
|
||||
load_and_broadcast(8, i64, 64)
|
||||
|
||||
; no masked load instruction for i8 and i16 types??
|
||||
masked_load(8, i8, 8, 1)
|
||||
masked_load(8, i16, 16, 2)
|
||||
masked_load(i8, 1)
|
||||
masked_load(i16, 2)
|
||||
|
||||
declare <8 x float> @llvm.x86.avx.maskload.ps.256(i8 *, <8 x float> %mask)
|
||||
declare <4 x double> @llvm.x86.avx.maskload.pd.256(i8 *, <4 x double> %mask)
|
||||
declare <8 x float> @llvm.x86.avx.maskload.ps.256(i8 *, <8 x MfORi32> %mask)
|
||||
declare <4 x double> @llvm.x86.avx.maskload.pd.256(i8 *, <4 x MdORi64> %mask)
|
||||
|
||||
define <8 x i32> @__masked_load_32(i8 *, <8 x i32> %mask) nounwind alwaysinline {
|
||||
%floatmask = bitcast <8 x i32> %mask to <8 x float>
|
||||
%floatval = call <8 x float> @llvm.x86.avx.maskload.ps.256(i8 * %0, <8 x float> %floatmask)
|
||||
define <8 x i32> @__masked_load_i32(i8 *, <8 x i32> %mask) nounwind alwaysinline {
|
||||
%floatmask = bitcast <8 x i32> %mask to <8 x MfORi32>
|
||||
%floatval = call <8 x float> @llvm.x86.avx.maskload.ps.256(i8 * %0, <8 x MfORi32> %floatmask)
|
||||
%retval = bitcast <8 x float> %floatval to <8 x i32>
|
||||
ret <8 x i32> %retval
|
||||
}
|
||||
|
||||
|
||||
define <8 x i64> @__masked_load_64(i8 *, <8 x i32> %mask) nounwind alwaysinline {
|
||||
define <8 x i64> @__masked_load_i64(i8 *, <8 x i32> %mask) nounwind alwaysinline {
|
||||
; double up masks, bitcast to doubles
|
||||
%mask0 = shufflevector <8 x i32> %mask, <8 x i32> undef,
|
||||
<8 x i32> <i32 0, i32 0, i32 1, i32 1, i32 2, i32 2, i32 3, i32 3>
|
||||
%mask1 = shufflevector <8 x i32> %mask, <8 x i32> undef,
|
||||
<8 x i32> <i32 4, i32 4, i32 5, i32 5, i32 6, i32 6, i32 7, i32 7>
|
||||
%mask0d = bitcast <8 x i32> %mask0 to <4 x double>
|
||||
%mask1d = bitcast <8 x i32> %mask1 to <4 x double>
|
||||
%mask0d = bitcast <8 x i32> %mask0 to <4 x MdORi64>
|
||||
%mask1d = bitcast <8 x i32> %mask1 to <4 x MdORi64>
|
||||
|
||||
%val0d = call <4 x double> @llvm.x86.avx.maskload.pd.256(i8 * %0, <4 x double> %mask0d)
|
||||
%ptr1 = getelementptr i8 * %0, i32 32
|
||||
%val1d = call <4 x double> @llvm.x86.avx.maskload.pd.256(i8 * %ptr1, <4 x double> %mask1d)
|
||||
%val0d = call <4 x double> @llvm.x86.avx.maskload.pd.256(i8 * %0, <4 x MdORi64> %mask0d)
|
||||
%ptr1 = getelementptr PTR_OP_ARGS(`i8') %0, i32 32
|
||||
%val1d = call <4 x double> @llvm.x86.avx.maskload.pd.256(i8 * %ptr1, <4 x MdORi64> %mask1d)
|
||||
|
||||
%vald = shufflevector <4 x double> %val0d, <4 x double> %val1d,
|
||||
<8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
||||
@@ -399,31 +429,29 @@ define <8 x i64> @__masked_load_64(i8 *, <8 x i32> %mask) nounwind alwaysinline
|
||||
ret <8 x i64> %val
|
||||
}
|
||||
|
||||
masked_load_float_double()
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; masked store
|
||||
|
||||
; FIXME: there is no AVX instruction for these, but we could be clever
|
||||
; by packing the bits down and setting the last 3/4 or half, respectively,
|
||||
; of the mask to zero... Not sure if this would be a win in the end
|
||||
gen_masked_store(8, i8, 8)
|
||||
gen_masked_store(8, i16, 16)
|
||||
gen_masked_store(i8)
|
||||
gen_masked_store(i16)
|
||||
|
||||
; note that mask is the 2nd parameter, not the 3rd one!!
|
||||
declare void @llvm.x86.avx.maskstore.ps.256(i8 *, <8 x float>, <8 x float>)
|
||||
declare void @llvm.x86.avx.maskstore.pd.256(i8 *, <4 x double>, <4 x double>)
|
||||
declare void @llvm.x86.avx.maskstore.ps.256(i8 *, <8 x MfORi32>, <8 x float>)
|
||||
declare void @llvm.x86.avx.maskstore.pd.256(i8 *, <4 x MdORi64>, <4 x double>)
|
||||
|
||||
define void @__masked_store_32(<8 x i32>* nocapture, <8 x i32>,
|
||||
<8 x i32>) nounwind alwaysinline {
|
||||
define void @__masked_store_i32(<8 x i32>* nocapture, <8 x i32>,
|
||||
<8 x i32>) nounwind alwaysinline {
|
||||
%ptr = bitcast <8 x i32> * %0 to i8 *
|
||||
%val = bitcast <8 x i32> %1 to <8 x float>
|
||||
%mask = bitcast <8 x i32> %2 to <8 x float>
|
||||
call void @llvm.x86.avx.maskstore.ps.256(i8 * %ptr, <8 x float> %mask, <8 x float> %val)
|
||||
%mask = bitcast <8 x i32> %2 to <8 x MfORi32>
|
||||
call void @llvm.x86.avx.maskstore.ps.256(i8 * %ptr, <8 x MfORi32> %mask, <8 x float> %val)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @__masked_store_64(<8 x i64>* nocapture, <8 x i64>,
|
||||
<8 x i32> %mask) nounwind alwaysinline {
|
||||
define void @__masked_store_i64(<8 x i64>* nocapture, <8 x i64>,
|
||||
<8 x i32> %mask) nounwind alwaysinline {
|
||||
%ptr = bitcast <8 x i64> * %0 to i8 *
|
||||
%val = bitcast <8 x i64> %1 to <8 x double>
|
||||
|
||||
@@ -432,31 +460,34 @@ define void @__masked_store_64(<8 x i64>* nocapture, <8 x i64>,
|
||||
%mask1 = shufflevector <8 x i32> %mask, <8 x i32> undef,
|
||||
<8 x i32> <i32 4, i32 4, i32 5, i32 5, i32 6, i32 6, i32 7, i32 7>
|
||||
|
||||
%mask0d = bitcast <8 x i32> %mask0 to <4 x double>
|
||||
%mask1d = bitcast <8 x i32> %mask1 to <4 x double>
|
||||
%mask0d = bitcast <8 x i32> %mask0 to <4 x MdORi64>
|
||||
%mask1d = bitcast <8 x i32> %mask1 to <4 x MdORi64>
|
||||
|
||||
%val0 = shufflevector <8 x double> %val, <8 x double> undef,
|
||||
<4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
||||
%val1 = shufflevector <8 x double> %val, <8 x double> undef,
|
||||
<4 x i32> <i32 4, i32 5, i32 6, i32 7>
|
||||
|
||||
call void @llvm.x86.avx.maskstore.pd.256(i8 * %ptr, <4 x double> %mask0d, <4 x double> %val0)
|
||||
%ptr1 = getelementptr i8 * %ptr, i32 32
|
||||
call void @llvm.x86.avx.maskstore.pd.256(i8 * %ptr1, <4 x double> %mask1d, <4 x double> %val1)
|
||||
call void @llvm.x86.avx.maskstore.pd.256(i8 * %ptr, <4 x MdORi64> %mask0d, <4 x double> %val0)
|
||||
%ptr1 = getelementptr PTR_OP_ARGS(`i8') %ptr, i32 32
|
||||
call void @llvm.x86.avx.maskstore.pd.256(i8 * %ptr1, <4 x MdORi64> %mask1d, <4 x double> %val1)
|
||||
ret void
|
||||
}
|
||||
|
||||
masked_store_float_double()
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; masked store blend
|
||||
|
||||
masked_store_blend_8_16_by_8()
|
||||
|
||||
declare <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float>, <8 x float>,
|
||||
<8 x float>) nounwind readnone
|
||||
|
||||
define void @__masked_store_blend_32(<8 x i32>* nocapture, <8 x i32>,
|
||||
<8 x i32>) nounwind alwaysinline {
|
||||
define void @__masked_store_blend_i32(<8 x i32>* nocapture, <8 x i32>,
|
||||
<8 x i32>) nounwind alwaysinline {
|
||||
%mask_as_float = bitcast <8 x i32> %2 to <8 x float>
|
||||
%oldValue = load <8 x i32>* %0, align 4
|
||||
%oldValue = load PTR_OP_ARGS(`<8 x i32>') %0, align 4
|
||||
%oldAsFloat = bitcast <8 x i32> %oldValue to <8 x float>
|
||||
%newAsFloat = bitcast <8 x i32> %1 to <8 x float>
|
||||
%blend = call <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float> %oldAsFloat,
|
||||
@@ -468,9 +499,9 @@ define void @__masked_store_blend_32(<8 x i32>* nocapture, <8 x i32>,
|
||||
}
|
||||
|
||||
|
||||
define void @__masked_store_blend_64(<8 x i64>* nocapture %ptr, <8 x i64> %new,
|
||||
<8 x i32> %i32mask) nounwind alwaysinline {
|
||||
%oldValue = load <8 x i64>* %ptr, align 8
|
||||
define void @__masked_store_blend_i64(<8 x i64>* nocapture %ptr, <8 x i64> %new,
|
||||
<8 x i32> %i32mask) nounwind alwaysinline {
|
||||
%oldValue = load PTR_OP_ARGS(`<8 x i64>') %ptr, align 8
|
||||
%mask = bitcast <8 x i32> %i32mask to <8 x float>
|
||||
|
||||
; Do 4x64-bit blends by doing two <8 x i32> blends, where the <8 x i32> values
|
||||
@@ -518,44 +549,21 @@ define void @__masked_store_blend_64(<8 x i64>* nocapture %ptr, <8 x i64> %new,
|
||||
ret void
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; scatter
|
||||
|
||||
gen_scatter(i8)
|
||||
gen_scatter(i16)
|
||||
gen_scatter(i32)
|
||||
gen_scatter(float)
|
||||
gen_scatter(i64)
|
||||
gen_scatter(double)
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; gather/scatter
|
||||
;; reciprocals in double precision, if supported
|
||||
|
||||
gen_gather(8, i8)
|
||||
gen_gather(8, i16)
|
||||
gen_gather(8, i32)
|
||||
gen_gather(8, i64)
|
||||
|
||||
gen_scatter(8, i8)
|
||||
gen_scatter(8, i16)
|
||||
gen_scatter(8, i32)
|
||||
gen_scatter(8, i64)
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; double precision sqrt
|
||||
|
||||
declare <4 x double> @llvm.x86.avx.sqrt.pd.256(<4 x double>) nounwind readnone
|
||||
|
||||
define <8 x double> @__sqrt_varying_double(<8 x double>) nounwind alwaysinline {
|
||||
unary4to8(ret, double, @llvm.x86.avx.sqrt.pd.256, %0)
|
||||
ret <8 x double> %ret
|
||||
}
|
||||
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; double precision min/max
|
||||
|
||||
declare <4 x double> @llvm.x86.avx.max.pd.256(<4 x double>, <4 x double>) nounwind readnone
|
||||
declare <4 x double> @llvm.x86.avx.min.pd.256(<4 x double>, <4 x double>) nounwind readnone
|
||||
|
||||
define <8 x double> @__min_varying_double(<8 x double>, <8 x double>) nounwind readnone alwaysinline {
|
||||
binary4to8(ret, double, @llvm.x86.avx.min.pd.256, %0, %1)
|
||||
ret <8 x double> %ret
|
||||
}
|
||||
|
||||
define <8 x double> @__max_varying_double(<8 x double>, <8 x double>) nounwind readnone alwaysinline {
|
||||
binary4to8(ret, double, @llvm.x86.avx.max.pd.256, %0, %1)
|
||||
ret <8 x double> %ret
|
||||
}
|
||||
rsqrtd_decl()
|
||||
rcpd_decl()
|
||||
|
||||
transcendetals_decl()
|
||||
trigonometry_decl()
|
||||
|
||||
81
builtins/target-avx1-i64x4.ll
Normal file
81
builtins/target-avx1-i64x4.ll
Normal file
@@ -0,0 +1,81 @@
|
||||
;; Copyright (c) 2013, Intel Corporation
|
||||
;; All rights reserved.
|
||||
;;
|
||||
;; Redistribution and use in source and binary forms, with or without
|
||||
;; modification, are permitted provided that the following conditions are
|
||||
;; met:
|
||||
;;
|
||||
;; * Redistributions of source code must retain the above copyright
|
||||
;; notice, this list of conditions and the following disclaimer.
|
||||
;;
|
||||
;; * Redistributions in binary form must reproduce the above copyright
|
||||
;; notice, this list of conditions and the following disclaimer in the
|
||||
;; documentation and/or other materials provided with the distribution.
|
||||
;;
|
||||
;; * Neither the name of Intel Corporation nor the names of its
|
||||
;; contributors may be used to endorse or promote products derived from
|
||||
;; this software without specific prior written permission.
|
||||
;;
|
||||
;;
|
||||
;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
||||
;; IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
||||
;; TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
|
||||
;; PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
|
||||
;; OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||
;; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
;; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
;; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
;; LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
;; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
;; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
include(`target-avx1-i64x4base.ll')
|
||||
|
||||
rdrand_decls()
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; int min/max
|
||||
|
||||
define <4 x i32> @__min_varying_int32(<4 x i32>, <4 x i32>) nounwind readonly alwaysinline {
|
||||
%call = call <4 x i32> @llvm.x86.sse41.pminsd(<4 x i32> %0, <4 x i32> %1)
|
||||
ret <4 x i32> %call
|
||||
}
|
||||
define <4 x i32> @__max_varying_int32(<4 x i32>, <4 x i32>) nounwind readonly alwaysinline {
|
||||
%call = call <4 x i32> @llvm.x86.sse41.pmaxsd(<4 x i32> %0, <4 x i32> %1)
|
||||
|
||||
ret <4 x i32> %call
|
||||
}
|
||||
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; unsigned int min/max
|
||||
|
||||
define <4 x i32> @__min_varying_uint32(<4 x i32>, <4 x i32>) nounwind readonly alwaysinline {
|
||||
%call = call <4 x i32> @llvm.x86.sse41.pminud(<4 x i32> %0, <4 x i32> %1)
|
||||
ret <4 x i32> %call
|
||||
}
|
||||
|
||||
define <4 x i32> @__max_varying_uint32(<4 x i32>, <4 x i32>) nounwind readonly alwaysinline {
|
||||
%call = call <4 x i32> @llvm.x86.sse41.pmaxud(<4 x i32> %0, <4 x i32> %1)
|
||||
ret <4 x i32> %call
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; half conversion routines
|
||||
|
||||
ifelse(NO_HALF_DECLARES, `1', `', `
|
||||
declare float @__half_to_float_uniform(i16 %v) nounwind readnone
|
||||
declare <WIDTH x float> @__half_to_float_varying(<WIDTH x i16> %v) nounwind readnone
|
||||
declare i16 @__float_to_half_uniform(float %v) nounwind readnone
|
||||
declare <WIDTH x i16> @__float_to_half_varying(<WIDTH x float> %v) nounwind readnone
|
||||
')
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; gather
|
||||
|
||||
gen_gather_factored(i8)
|
||||
gen_gather_factored(i16)
|
||||
gen_gather_factored(i32)
|
||||
gen_gather_factored(float)
|
||||
gen_gather_factored(i64)
|
||||
gen_gather_factored(double)
|
||||
519
builtins/target-avx1-i64x4base.ll
Normal file
519
builtins/target-avx1-i64x4base.ll
Normal file
@@ -0,0 +1,519 @@
|
||||
;; Copyright (c) 2013-2015, Intel Corporation
|
||||
;; All rights reserved.
|
||||
;;
|
||||
;; Redistribution and use in source and binary forms, with or without
|
||||
;; modification, are permitted provided that the following conditions are
|
||||
;; met:
|
||||
;;
|
||||
;; * Redistributions of source code must retain the above copyright
|
||||
;; notice, this list of conditions and the following disclaimer.
|
||||
;;
|
||||
;; * Redistributions in binary form must reproduce the above copyright
|
||||
;; notice, this list of conditions and the following disclaimer in the
|
||||
;; documentation and/or other materials provided with the distribution.
|
||||
;;
|
||||
;; * Neither the name of Intel Corporation nor the names of its
|
||||
;; contributors may be used to endorse or promote products derived from
|
||||
;; this software without specific prior written permission.
|
||||
;;
|
||||
;;
|
||||
;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
||||
;; IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
||||
;; TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
|
||||
;; PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
|
||||
;; OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||
;; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
;; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
;; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
;; LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
;; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
;; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; Basic 4-wide definitions
|
||||
|
||||
define(`WIDTH',`4')
|
||||
define(`MASK',`i64')
|
||||
include(`util.m4')
|
||||
|
||||
stdlib_core()
|
||||
packed_load_and_store()
|
||||
scans()
|
||||
int64minmax()
|
||||
saturation_arithmetic()
|
||||
|
||||
include(`target-avx-common.ll')
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; rcp
|
||||
|
||||
;; sse intrinsic
|
||||
declare <4 x float> @llvm.x86.sse.rcp.ps(<4 x float>) nounwind readnone
|
||||
|
||||
define <4 x float> @__rcp_varying_float(<4 x float>) nounwind readonly alwaysinline {
|
||||
; float iv = __rcp_v(v);
|
||||
; return iv * (2. - v * iv);
|
||||
|
||||
%call = call <4 x float> @llvm.x86.sse.rcp.ps(<4 x float> %0)
|
||||
; do one N-R iteration
|
||||
%v_iv = fmul <4 x float> %0, %call
|
||||
%two_minus = fsub <4 x float> <float 2., float 2., float 2., float 2.>, %v_iv
|
||||
%iv_mul = fmul <4 x float> %call, %two_minus
|
||||
ret <4 x float> %iv_mul
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; rounding floats
|
||||
|
||||
;; sse intrinsic
|
||||
declare <4 x float> @llvm.x86.sse41.round.ps(<4 x float>, i32) nounwind readnone
|
||||
|
||||
define <4 x float> @__round_varying_float(<4 x float>) nounwind readonly alwaysinline {
|
||||
; roundps, round mode nearest 0b00 | don't signal precision exceptions 0b1000 = 8
|
||||
%call = call <4 x float> @llvm.x86.sse41.round.ps(<4 x float> %0, i32 8)
|
||||
ret <4 x float> %call
|
||||
}
|
||||
|
||||
define <4 x float> @__floor_varying_float(<4 x float>) nounwind readonly alwaysinline {
|
||||
; roundps, round down 0b01 | don't signal precision exceptions 0b1001 = 9
|
||||
%call = call <4 x float> @llvm.x86.sse41.round.ps(<4 x float> %0, i32 9)
|
||||
ret <4 x float> %call
|
||||
}
|
||||
|
||||
define <4 x float> @__ceil_varying_float(<4 x float>) nounwind readonly alwaysinline {
|
||||
; roundps, round up 0b10 | don't signal precision exceptions 0b1010 = 10
|
||||
%call = call <4 x float> @llvm.x86.sse41.round.ps(<4 x float> %0, i32 10)
|
||||
ret <4 x float> %call
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; rounding doubles
|
||||
|
||||
;; avx intrinsic
|
||||
declare <4 x double> @llvm.x86.avx.round.pd.256(<4 x double>, i32) nounwind readnone
|
||||
|
||||
define <4 x double> @__round_varying_double(<4 x double>) nounwind readonly alwaysinline {
|
||||
%call = call <4 x double> @llvm.x86.avx.round.pd.256(<4 x double> %0, i32 8)
|
||||
ret <4 x double> %call
|
||||
}
|
||||
|
||||
define <4 x double> @__floor_varying_double(<4 x double>) nounwind readonly alwaysinline {
|
||||
; roundpd, round down 0b01 | don't signal precision exceptions 0b1000 = 9
|
||||
%call = call <4 x double> @llvm.x86.avx.round.pd.256(<4 x double> %0, i32 9)
|
||||
ret <4 x double> %call
|
||||
}
|
||||
|
||||
|
||||
define <4 x double> @__ceil_varying_double(<4 x double>) nounwind readonly alwaysinline {
|
||||
; roundpd, round up 0b10 | don't signal precision exceptions 0b1000 = 10
|
||||
%call = call <4 x double> @llvm.x86.avx.round.pd.256(<4 x double> %0, i32 10)
|
||||
ret <4 x double> %call
|
||||
}
|
||||
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; rsqrt
|
||||
|
||||
;; sse intrinsic
|
||||
declare <4 x float> @llvm.x86.sse.rsqrt.ps(<4 x float>) nounwind readnone
|
||||
|
||||
define <4 x float> @__rsqrt_varying_float(<4 x float> %v) nounwind readonly alwaysinline {
|
||||
; float is = __rsqrt_v(v);
|
||||
%is = call <4 x float> @llvm.x86.sse.rsqrt.ps(<4 x float> %v)
|
||||
; Newton-Raphson iteration to improve precision
|
||||
; return 0.5 * is * (3. - (v * is) * is);
|
||||
%v_is = fmul <4 x float> %v, %is
|
||||
%v_is_is = fmul <4 x float> %v_is, %is
|
||||
%three_sub = fsub <4 x float> <float 3., float 3., float 3., float 3.>, %v_is_is
|
||||
%is_mul = fmul <4 x float> %is, %three_sub
|
||||
%half_scale = fmul <4 x float> <float 0.5, float 0.5, float 0.5, float 0.5>, %is_mul
|
||||
ret <4 x float> %half_scale
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; sqrt
|
||||
|
||||
;; sse intrinsic
|
||||
declare <4 x float> @llvm.x86.sse.sqrt.ps(<4 x float>) nounwind readnone
|
||||
|
||||
define <4 x float> @__sqrt_varying_float(<4 x float>) nounwind readonly alwaysinline {
|
||||
%call = call <4 x float> @llvm.x86.sse.sqrt.ps(<4 x float> %0)
|
||||
ret <4 x float> %call
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; double precision sqrt
|
||||
|
||||
;; avx<76> intrinsic
|
||||
declare <4 x double> @llvm.x86.avx.sqrt.pd.256(<4 x double>) nounwind readnone
|
||||
|
||||
define <4 x double> @__sqrt_varying_double(<4 x double>) nounwind alwaysinline {
|
||||
%call = call <4 x double> @llvm.x86.avx.sqrt.pd.256(<4 x double> %0)
|
||||
ret <4 x double> %call
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; svml
|
||||
|
||||
include(`svml.m4')
|
||||
;; single precision
|
||||
svml_declare(float,f4,4)
|
||||
svml_define(float,f4,4,f)
|
||||
|
||||
;; double precision
|
||||
svml_declare(double,4,4)
|
||||
svml_define(double,4,4,d)
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; float min/max
|
||||
|
||||
;; sse intrinsics
|
||||
declare <4 x float> @llvm.x86.sse.max.ps(<4 x float>, <4 x float>) nounwind readnone
|
||||
declare <4 x float> @llvm.x86.sse.min.ps(<4 x float>, <4 x float>) nounwind readnone
|
||||
|
||||
define <4 x float> @__max_varying_float(<4 x float>, <4 x float>) nounwind readonly alwaysinline {
|
||||
%call = call <4 x float> @llvm.x86.sse.max.ps(<4 x float> %0, <4 x float> %1)
|
||||
ret <4 x float> %call
|
||||
}
|
||||
|
||||
define <4 x float> @__min_varying_float(<4 x float>, <4 x float>) nounwind readonly alwaysinline {
|
||||
%call = call <4 x float> @llvm.x86.sse.min.ps(<4 x float> %0, <4 x float> %1)
|
||||
ret <4 x float> %call
|
||||
}
|
||||
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
; horizontal ops
|
||||
|
||||
;; sse intrinsic
|
||||
declare i32 @llvm.x86.avx.movmsk.pd.256(<4 x double>) nounwind readnone
|
||||
|
||||
define i64 @__movmsk(<4 x i64>) nounwind readnone alwaysinline {
|
||||
%floatmask = bitcast <4 x i64> %0 to <4 x double>
|
||||
%v = call i32 @llvm.x86.avx.movmsk.pd.256(<4 x double> %floatmask) nounwind readnone
|
||||
%v64 = zext i32 %v to i64
|
||||
ret i64 %v64
|
||||
}
|
||||
|
||||
define i1 @__any(<4 x i64>) nounwind readnone alwaysinline {
|
||||
%floatmask = bitcast <4 x i64> %0 to <4 x double>
|
||||
%v = call i32 @llvm.x86.avx.movmsk.pd.256(<4 x double> %floatmask) nounwind readnone
|
||||
%cmp = icmp ne i32 %v, 0
|
||||
ret i1 %cmp
|
||||
}
|
||||
|
||||
define i1 @__all(<4 x i64>) nounwind readnone alwaysinline {
|
||||
%floatmask = bitcast <4 x i64> %0 to <4 x double>
|
||||
%v = call i32 @llvm.x86.avx.movmsk.pd.256(<4 x double> %floatmask) nounwind readnone
|
||||
%cmp = icmp eq i32 %v, 15
|
||||
ret i1 %cmp
|
||||
}
|
||||
|
||||
define i1 @__none(<4 x i64>) nounwind readnone alwaysinline {
|
||||
%floatmask = bitcast <4 x i64> %0 to <4 x double>
|
||||
%v = call i32 @llvm.x86.avx.movmsk.pd.256(<4 x double> %floatmask) nounwind readnone
|
||||
%cmp = icmp eq i32 %v, 0
|
||||
ret i1 %cmp
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; horizontal float ops
|
||||
|
||||
;; sse intrinsic
|
||||
declare <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float>, <4 x float>) nounwind readnone
|
||||
|
||||
define float @__reduce_add_float(<4 x float>) nounwind readonly alwaysinline {
|
||||
%v1 = call <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float> %0, <4 x float> %0)
|
||||
%v2 = call <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float> %v1, <4 x float> %v1)
|
||||
%scalar = extractelement <4 x float> %v2, i32 0
|
||||
ret float %scalar
|
||||
}
|
||||
|
||||
define float @__reduce_min_float(<4 x float>) nounwind readnone {
|
||||
reduce4(float, @__min_varying_float, @__min_uniform_float)
|
||||
}
|
||||
|
||||
define float @__reduce_max_float(<4 x float>) nounwind readnone {
|
||||
reduce4(float, @__max_varying_float, @__max_uniform_float)
|
||||
}
|
||||
|
||||
reduce_equal(4)
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; horizontal int8 ops
|
||||
|
||||
declare <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8>, <16 x i8>) nounwind readnone
|
||||
|
||||
define i16 @__reduce_add_int8(<4 x i8>) nounwind readnone alwaysinline
|
||||
{
|
||||
%wide8 = shufflevector <4 x i8> %0, <4 x i8> zeroinitializer,
|
||||
<16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 4, i32 4, i32 4,
|
||||
i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>
|
||||
%rv = call <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8> %wide8,
|
||||
<16 x i8> zeroinitializer)
|
||||
%r0 = extractelement <2 x i64> %rv, i32 0
|
||||
%r1 = extractelement <2 x i64> %rv, i32 1
|
||||
%r = add i64 %r0, %r1
|
||||
%r16 = trunc i64 %r to i16
|
||||
ret i16 %r16
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; horizontal int16 ops
|
||||
|
||||
define internal <4 x i16> @__add_varying_i16(<4 x i16>,
|
||||
<4 x i16>) nounwind readnone alwaysinline {
|
||||
%r = add <4 x i16> %0, %1
|
||||
ret <4 x i16> %r
|
||||
}
|
||||
|
||||
define internal i16 @__add_uniform_i16(i16, i16) nounwind readnone alwaysinline {
|
||||
%r = add i16 %0, %1
|
||||
ret i16 %r
|
||||
}
|
||||
|
||||
define i16 @__reduce_add_int16(<4 x i16>) nounwind readnone alwaysinline {
|
||||
reduce4(i16, @__add_varying_i16, @__add_uniform_i16)
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; horizontal int32 ops
|
||||
|
||||
define <4 x i32> @__add_varying_int32(<4 x i32>,
|
||||
<4 x i32>) nounwind readnone alwaysinline {
|
||||
%s = add <4 x i32> %0, %1
|
||||
ret <4 x i32> %s
|
||||
}
|
||||
|
||||
define i32 @__add_uniform_int32(i32, i32) nounwind readnone alwaysinline {
|
||||
%s = add i32 %0, %1
|
||||
ret i32 %s
|
||||
}
|
||||
|
||||
define i32 @__reduce_add_int32(<4 x i32>) nounwind readnone alwaysinline {
|
||||
reduce4(i32, @__add_varying_int32, @__add_uniform_int32)
|
||||
}
|
||||
|
||||
|
||||
define i32 @__reduce_min_int32(<4 x i32>) nounwind readnone alwaysinline {
|
||||
reduce4(i32, @__min_varying_int32, @__min_uniform_int32)
|
||||
}
|
||||
|
||||
|
||||
define i32 @__reduce_max_int32(<4 x i32>) nounwind readnone alwaysinline {
|
||||
reduce4(i32, @__max_varying_int32, @__max_uniform_int32)
|
||||
}
|
||||
|
||||
define i32 @__reduce_min_uint32(<4 x i32>) nounwind readnone alwaysinline {
|
||||
reduce4(i32, @__min_varying_uint32, @__min_uniform_uint32)
|
||||
}
|
||||
|
||||
define i32 @__reduce_max_uint32(<4 x i32>) nounwind readnone alwaysinline {
|
||||
reduce4(i32, @__max_varying_uint32, @__max_uniform_uint32)
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; horizontal double ops
|
||||
|
||||
declare <4 x double> @llvm.x86.avx.hadd.pd.256(<4 x double>, <4 x double>) nounwind readnone
|
||||
|
||||
define double @__reduce_add_double(<4 x double>) nounwind readonly alwaysinline {
|
||||
%v0 = shufflevector <4 x double> %0, <4 x double> undef,
|
||||
<4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
||||
%v1 = shufflevector <4 x double> <double 0.,double 0.,double 0.,double 0.>, <4 x double> undef,
|
||||
<4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
||||
;; %v1 = <4 x double> <double 0., double 0., double 0., double 0.>
|
||||
%sum0 = call <4 x double> @llvm.x86.avx.hadd.pd.256(<4 x double> %v0, <4 x double> %v1)
|
||||
%sum1 = call <4 x double> @llvm.x86.avx.hadd.pd.256(<4 x double> %sum0, <4 x double> %sum0)
|
||||
%final0 = extractelement <4 x double> %sum1, i32 0
|
||||
%final1 = extractelement <4 x double> %sum1, i32 2
|
||||
%sum = fadd double %final0, %final1
|
||||
|
||||
ret double %sum
|
||||
}
|
||||
|
||||
define double @__reduce_min_double(<4 x double>) nounwind readnone alwaysinline {
|
||||
reduce4(double, @__min_varying_double, @__min_uniform_double)
|
||||
}
|
||||
|
||||
|
||||
define double @__reduce_max_double(<4 x double>) nounwind readnone alwaysinline {
|
||||
reduce4(double, @__max_varying_double, @__max_uniform_double)
|
||||
}
|
||||
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; horizontal int64 ops
|
||||
|
||||
define <4 x i64> @__add_varying_int64(<4 x i64>,
|
||||
<4 x i64>) nounwind readnone alwaysinline {
|
||||
%s = add <4 x i64> %0, %1
|
||||
ret <4 x i64> %s
|
||||
}
|
||||
|
||||
define i64 @__add_uniform_int64(i64, i64) nounwind readnone alwaysinline {
|
||||
%s = add i64 %0, %1
|
||||
ret i64 %s
|
||||
}
|
||||
|
||||
define i64 @__reduce_add_int64(<4 x i64>) nounwind readnone alwaysinline {
|
||||
reduce4(i64, @__add_varying_int64, @__add_uniform_int64)
|
||||
}
|
||||
|
||||
|
||||
define i64 @__reduce_min_int64(<4 x i64>) nounwind readnone alwaysinline {
|
||||
reduce4(i64, @__min_varying_int64, @__min_uniform_int64)
|
||||
}
|
||||
|
||||
|
||||
define i64 @__reduce_max_int64(<4 x i64>) nounwind readnone alwaysinline {
|
||||
reduce4(i64, @__max_varying_int64, @__max_uniform_int64)
|
||||
}
|
||||
|
||||
|
||||
define i64 @__reduce_min_uint64(<4 x i64>) nounwind readnone alwaysinline {
|
||||
reduce4(i64, @__min_varying_uint64, @__min_uniform_uint64)
|
||||
}
|
||||
|
||||
|
||||
define i64 @__reduce_max_uint64(<4 x i64>) nounwind readnone alwaysinline {
|
||||
reduce4(i64, @__max_varying_uint64, @__max_uniform_uint64)
|
||||
}
|
||||
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; unaligned loads/loads+broadcasts
|
||||
|
||||
|
||||
; no masked load instruction for i8 and i16 types??
|
||||
masked_load(i8, 1)
|
||||
masked_load(i16, 2)
|
||||
|
||||
;; avx intrinsics
|
||||
declare <4 x float> @llvm.x86.avx.maskload.ps(i8 *, <4 x MfORi32> %mask)
|
||||
declare <4 x double> @llvm.x86.avx.maskload.pd.256(i8 *, <4 x MdORi64> %mask)
|
||||
|
||||
define <4 x i32> @__masked_load_i32(i8 *, <4 x i64> %mask64) nounwind alwaysinline {
|
||||
%mask = trunc <4 x i64> %mask64 to <4 x i32>
|
||||
%floatmask = bitcast <4 x i32> %mask to <4 x MfORi32>
|
||||
%floatval = call <4 x float> @llvm.x86.avx.maskload.ps(i8 * %0, <4 x MfORi32> %floatmask)
|
||||
%retval = bitcast <4 x float> %floatval to <4 x i32>
|
||||
ret <4 x i32> %retval
|
||||
}
|
||||
|
||||
|
||||
define <4 x i64> @__masked_load_i64(i8 *, <4 x i64> %mask) nounwind alwaysinline {
|
||||
%doublemask = bitcast <4 x i64> %mask to <4 x MdORi64>
|
||||
%doubleval = call <4 x double> @llvm.x86.avx.maskload.pd.256(i8 * %0, <4 x MdORi64> %doublemask)
|
||||
%retval = bitcast <4 x double> %doubleval to <4 x i64>
|
||||
ret <4 x i64> %retval
|
||||
}
|
||||
|
||||
masked_load_float_double()
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; masked store
|
||||
|
||||
gen_masked_store(i8)
|
||||
gen_masked_store(i16)
|
||||
|
||||
; note that mask is the 2nd parameter, not the 3rd one!!
|
||||
;; avx intrinsics
|
||||
declare void @llvm.x86.avx.maskstore.ps (i8 *, <4 x MfORi32>, <4 x float>)
|
||||
declare void @llvm.x86.avx.maskstore.pd.256(i8 *, <4 x MdORi64>, <4 x double>)
|
||||
|
||||
define void @__masked_store_i32(<4 x i32>* nocapture, <4 x i32>,
|
||||
<4 x i64>) nounwind alwaysinline {
|
||||
%mask32 = trunc <4 x i64> %2 to <4 x i32>
|
||||
|
||||
%ptr = bitcast <4 x i32> * %0 to i8 *
|
||||
%val = bitcast <4 x i32> %1 to <4 x float>
|
||||
%mask = bitcast <4 x i32> %mask32 to <4 x MfORi32>
|
||||
call void @llvm.x86.avx.maskstore.ps(i8 * %ptr, <4 x MfORi32> %mask, <4 x float> %val)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @__masked_store_i64(<4 x i64>* nocapture, <4 x i64>,
|
||||
<4 x i64>) nounwind alwaysinline {
|
||||
%ptr = bitcast <4 x i64> * %0 to i8 *
|
||||
%val = bitcast <4 x i64> %1 to <4 x double>
|
||||
%mask = bitcast <4 x i64> %2 to <4 x MdORi64>
|
||||
call void @llvm.x86.avx.maskstore.pd.256(i8 * %ptr, <4 x MdORi64> %mask, <4 x double> %val)
|
||||
ret void
|
||||
}
|
||||
|
||||
|
||||
masked_store_blend_8_16_by_4_mask64()
|
||||
|
||||
;; sse intrinsic
|
||||
declare <4 x float> @llvm.x86.sse41.blendvps(<4 x float>, <4 x float>,
|
||||
<4 x float>) nounwind readnone
|
||||
|
||||
define void @__masked_store_blend_i32(<4 x i32>* nocapture, <4 x i32>,
|
||||
<4 x i64>) nounwind alwaysinline {
|
||||
%mask = trunc <4 x i64> %2 to <4 x i32>
|
||||
%mask_as_float = bitcast <4 x i32> %mask to <4 x float>
|
||||
%oldValue = load PTR_OP_ARGS(` <4 x i32>') %0, align 4
|
||||
%oldAsFloat = bitcast <4 x i32> %oldValue to <4 x float>
|
||||
%newAsFloat = bitcast <4 x i32> %1 to <4 x float>
|
||||
%blend = call <4 x float> @llvm.x86.sse41.blendvps(<4 x float> %oldAsFloat,
|
||||
<4 x float> %newAsFloat,
|
||||
<4 x float> %mask_as_float)
|
||||
%blendAsInt = bitcast <4 x float> %blend to <4 x i32>
|
||||
store <4 x i32> %blendAsInt, <4 x i32>* %0, align 4
|
||||
ret void
|
||||
}
|
||||
|
||||
;; avx intrinsic
|
||||
declare <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double>, <4 x double>,
|
||||
<4 x double>) nounwind readnone
|
||||
|
||||
define void @__masked_store_blend_i64(<4 x i64>* nocapture , <4 x i64>,
|
||||
<4 x i64>) nounwind alwaysinline {
|
||||
%mask_as_double = bitcast <4 x i64> %2 to <4 x double>
|
||||
%oldValue = load PTR_OP_ARGS(` <4 x i64>') %0, align 4
|
||||
%oldAsDouble = bitcast <4 x i64> %oldValue to <4 x double>
|
||||
%newAsDouble = bitcast <4 x i64> %1 to <4 x double>
|
||||
%blend = call <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double> %oldAsDouble,
|
||||
<4 x double> %newAsDouble,
|
||||
<4 x double> %mask_as_double)
|
||||
%blendAsInt = bitcast <4 x double> %blend to <4 x i64>
|
||||
store <4 x i64> %blendAsInt, <4 x i64>* %0, align 4
|
||||
ret void
|
||||
}
|
||||
|
||||
masked_store_float_double()
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; scatter
|
||||
|
||||
gen_scatter(i8)
|
||||
gen_scatter(i16)
|
||||
gen_scatter(i32)
|
||||
gen_scatter(float)
|
||||
gen_scatter(i64)
|
||||
gen_scatter(double)
|
||||
|
||||
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; double precision min/max
|
||||
|
||||
declare <4 x double> @llvm.x86.avx.max.pd.256(<4 x double>, <4 x double>) nounwind readnone
|
||||
declare <4 x double> @llvm.x86.avx.min.pd.256(<4 x double>, <4 x double>) nounwind readnone
|
||||
|
||||
define <4 x double> @__min_varying_double(<4 x double>, <4 x double>) nounwind readnone alwaysinline {
|
||||
%call = call <4 x double> @llvm.x86.avx.min.pd.256(<4 x double> %0, <4 x double> %1)
|
||||
ret <4 x double> %call
|
||||
}
|
||||
|
||||
define <4 x double> @__max_varying_double(<4 x double>, <4 x double>) nounwind readnone alwaysinline {
|
||||
%call = call <4 x double> @llvm.x86.avx.max.pd.256(<4 x double> %0, <4 x double> %1)
|
||||
ret <4 x double> %call
|
||||
}
|
||||
|
||||
rsqrtd_decl()
|
||||
rcpd_decl()
|
||||
|
||||
transcendetals_decl()
|
||||
trigonometry_decl()
|
||||
81
builtins/target-avx1-x2.ll
Normal file
81
builtins/target-avx1-x2.ll
Normal file
@@ -0,0 +1,81 @@
|
||||
;; Copyright (c) 2010-2012, Intel Corporation
|
||||
;; All rights reserved.
|
||||
;;
|
||||
;; Redistribution and use in source and binary forms, with or without
|
||||
;; modification, are permitted provided that the following conditions are
|
||||
;; met:
|
||||
;;
|
||||
;; * Redistributions of source code must retain the above copyright
|
||||
;; notice, this list of conditions and the following disclaimer.
|
||||
;;
|
||||
;; * Redistributions in binary form must reproduce the above copyright
|
||||
;; notice, this list of conditions and the following disclaimer in the
|
||||
;; documentation and/or other materials provided with the distribution.
|
||||
;;
|
||||
;; * Neither the name of Intel Corporation nor the names of its
|
||||
;; contributors may be used to endorse or promote products derived from
|
||||
;; this software without specific prior written permission.
|
||||
;;
|
||||
;;
|
||||
;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
||||
;; IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
||||
;; TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
|
||||
;; PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
|
||||
;; OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||
;; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
;; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
;; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
;; LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
;; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
;; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
include(`target-avx-x2.ll')
|
||||
|
||||
rdrand_decls()
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; int min/max
|
||||
|
||||
define <16 x i32> @__min_varying_int32(<16 x i32>, <16 x i32>) nounwind readonly alwaysinline {
|
||||
binary4to16(ret, i32, @llvm.x86.sse41.pminsd, %0, %1)
|
||||
ret <16 x i32> %ret
|
||||
}
|
||||
|
||||
define <16 x i32> @__max_varying_int32(<16 x i32>, <16 x i32>) nounwind readonly alwaysinline {
|
||||
binary4to16(ret, i32, @llvm.x86.sse41.pmaxsd, %0, %1)
|
||||
ret <16 x i32> %ret
|
||||
}
|
||||
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; unsigned int min/max
|
||||
|
||||
define <16 x i32> @__min_varying_uint32(<16 x i32>, <16 x i32>) nounwind readonly alwaysinline {
|
||||
binary4to16(ret, i32, @llvm.x86.sse41.pminud, %0, %1)
|
||||
ret <16 x i32> %ret
|
||||
}
|
||||
|
||||
define <16 x i32> @__max_varying_uint32(<16 x i32>, <16 x i32>) nounwind readonly alwaysinline {
|
||||
binary4to16(ret, i32, @llvm.x86.sse41.pmaxud, %0, %1)
|
||||
ret <16 x i32> %ret
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; half conversion routines
|
||||
|
||||
ifelse(NO_HALF_DECLARES, `1', `', `
|
||||
declare float @__half_to_float_uniform(i16 %v) nounwind readnone
|
||||
declare <WIDTH x float> @__half_to_float_varying(<WIDTH x i16> %v) nounwind readnone
|
||||
declare i16 @__float_to_half_uniform(float %v) nounwind readnone
|
||||
declare <WIDTH x i16> @__float_to_half_varying(<WIDTH x float> %v) nounwind readnone
|
||||
')
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; gather
|
||||
|
||||
gen_gather_factored(i8)
|
||||
gen_gather_factored(i16)
|
||||
gen_gather_factored(i32)
|
||||
gen_gather_factored(float)
|
||||
gen_gather_factored(i64)
|
||||
gen_gather_factored(double)
|
||||
82
builtins/target-avx1.ll
Normal file
82
builtins/target-avx1.ll
Normal file
@@ -0,0 +1,82 @@
|
||||
;; Copyright (c) 2010-2013, Intel Corporation
|
||||
;; All rights reserved.
|
||||
;;
|
||||
;; Redistribution and use in source and binary forms, with or without
|
||||
;; modification, are permitted provided that the following conditions are
|
||||
;; met:
|
||||
;;
|
||||
;; * Redistributions of source code must retain the above copyright
|
||||
;; notice, this list of conditions and the following disclaimer.
|
||||
;;
|
||||
;; * Redistributions in binary form must reproduce the above copyright
|
||||
;; notice, this list of conditions and the following disclaimer in the
|
||||
;; documentation and/or other materials provided with the distribution.
|
||||
;;
|
||||
;; * Neither the name of Intel Corporation nor the names of its
|
||||
;; contributors may be used to endorse or promote products derived from
|
||||
;; this software without specific prior written permission.
|
||||
;;
|
||||
;;
|
||||
;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
||||
;; IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
||||
;; TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
|
||||
;; PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
|
||||
;; OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||
;; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
;; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
;; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
;; LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
;; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
;; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
include(`target-avx.ll')
|
||||
|
||||
rdrand_decls()
|
||||
saturation_arithmetic()
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; int min/max
|
||||
|
||||
define <8 x i32> @__min_varying_int32(<8 x i32>, <8 x i32>) nounwind readonly alwaysinline {
|
||||
binary4to8(ret, i32, @llvm.x86.sse41.pminsd, %0, %1)
|
||||
ret <8 x i32> %ret
|
||||
}
|
||||
|
||||
define <8 x i32> @__max_varying_int32(<8 x i32>, <8 x i32>) nounwind readonly alwaysinline {
|
||||
binary4to8(ret, i32, @llvm.x86.sse41.pmaxsd, %0, %1)
|
||||
ret <8 x i32> %ret
|
||||
}
|
||||
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; unsigned int min/max
|
||||
|
||||
define <8 x i32> @__min_varying_uint32(<8 x i32>, <8 x i32>) nounwind readonly alwaysinline {
|
||||
binary4to8(ret, i32, @llvm.x86.sse41.pminud, %0, %1)
|
||||
ret <8 x i32> %ret
|
||||
}
|
||||
|
||||
define <8 x i32> @__max_varying_uint32(<8 x i32>, <8 x i32>) nounwind readonly alwaysinline {
|
||||
binary4to8(ret, i32, @llvm.x86.sse41.pmaxud, %0, %1)
|
||||
ret <8 x i32> %ret
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; half conversion routines
|
||||
|
||||
ifelse(NO_HALF_DECLARES, `1', `', `
|
||||
declare float @__half_to_float_uniform(i16 %v) nounwind readnone
|
||||
declare <WIDTH x float> @__half_to_float_varying(<WIDTH x i16> %v) nounwind readnone
|
||||
declare i16 @__float_to_half_uniform(float %v) nounwind readnone
|
||||
declare <WIDTH x i16> @__float_to_half_varying(<WIDTH x float> %v) nounwind readnone
|
||||
')
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; gather
|
||||
|
||||
gen_gather_factored(i8)
|
||||
gen_gather_factored(i16)
|
||||
gen_gather_factored(i32)
|
||||
gen_gather_factored(float)
|
||||
gen_gather_factored(i64)
|
||||
gen_gather_factored(double)
|
||||
119
builtins/target-avx11-i64x4.ll
Normal file
119
builtins/target-avx11-i64x4.ll
Normal file
@@ -0,0 +1,119 @@
|
||||
;; Copyright (c) 2013, Intel Corporation
|
||||
;; All rights reserved.
|
||||
;;
|
||||
;; Redistribution and use in source and binary forms, with or without
|
||||
;; modification, are permitted provided that the following conditions are
|
||||
;; met:
|
||||
;;
|
||||
;; * Redistributions of source code must retain the above copyright
|
||||
;; notice, this list of conditions and the following disclaimer.
|
||||
;;
|
||||
;; * Redistributions in binary form must reproduce the above copyright
|
||||
;; notice, this list of conditions and the following disclaimer in the
|
||||
;; documentation and/or other materials provided with the distribution.
|
||||
;;
|
||||
;; * Neither the name of Intel Corporation nor the names of its
|
||||
;; contributors may be used to endorse or promote products derived from
|
||||
;; this software without specific prior written permission.
|
||||
;;
|
||||
;;
|
||||
;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
||||
;; IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
||||
;; TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
|
||||
;; PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
|
||||
;; OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||
;; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
;; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
;; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
;; LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
;; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
;; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
include(`target-avx1-i64x4base.ll')
|
||||
|
||||
rdrand_definition()
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; int min/max
|
||||
|
||||
define <4 x i32> @__min_varying_int32(<4 x i32>, <4 x i32>) nounwind readonly alwaysinline {
|
||||
%m = call <4 x i32> @llvm.x86.sse41.pminsd(<4 x i32> %0, <4 x i32> %1)
|
||||
ret <4 x i32> %m
|
||||
}
|
||||
|
||||
define <4 x i32> @__max_varying_int32(<4 x i32>, <4 x i32>) nounwind readonly alwaysinline {
|
||||
%m = call <4 x i32> @llvm.x86.sse41.pmaxsd(<4 x i32> %0, <4 x i32> %1)
|
||||
ret <4 x i32> %m
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; unsigned int min/max
|
||||
|
||||
define <4 x i32> @__min_varying_uint32(<4 x i32>, <4 x i32>) nounwind readonly alwaysinline {
|
||||
%m = call <4 x i32> @llvm.x86.sse41.pminud(<4 x i32> %0, <4 x i32> %1)
|
||||
ret <4 x i32> %m
|
||||
}
|
||||
|
||||
define <4 x i32> @__max_varying_uint32(<4 x i32>, <4 x i32>) nounwind readonly alwaysinline {
|
||||
%m = call <4 x i32> @llvm.x86.sse41.pmaxud(<4 x i32> %0, <4 x i32> %1)
|
||||
ret <4 x i32> %m
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; gather
|
||||
|
||||
gen_gather(i8)
|
||||
gen_gather(i16)
|
||||
gen_gather(i32)
|
||||
gen_gather(float)
|
||||
gen_gather(i64)
|
||||
gen_gather(double)
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; float/half conversions
|
||||
|
||||
define(`expand_4to8', `
|
||||
%$3 = shufflevector <4 x $1> %$2, <4 x $1> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
|
||||
')
|
||||
define(`extract_4from8', `
|
||||
%$3 = shufflevector <8 x $1> %$2, <8 x $1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
||||
')
|
||||
|
||||
declare <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16>) nounwind readnone
|
||||
; 0 is round nearest even
|
||||
declare <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float>, i32) nounwind readnone
|
||||
|
||||
define <4 x float> @__half_to_float_varying(<4 x i16> %v4) nounwind readnone {
|
||||
expand_4to8(i16, v4, v)
|
||||
%r = call <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16> %v)
|
||||
extract_4from8(float, r, ret)
|
||||
ret <4 x float> %ret
|
||||
}
|
||||
|
||||
define <4 x i16> @__float_to_half_varying(<4 x float> %v4) nounwind readnone {
|
||||
expand_4to8(float, v4, v)
|
||||
%r = call <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float> %v, i32 0)
|
||||
extract_4from8(i16, r, ret)
|
||||
ret <4 x i16> %ret
|
||||
}
|
||||
|
||||
define float @__half_to_float_uniform(i16 %v) nounwind readnone {
|
||||
%v1 = bitcast i16 %v to <1 x i16>
|
||||
%vv = shufflevector <1 x i16> %v1, <1 x i16> undef,
|
||||
<8 x i32> <i32 0, i32 undef, i32 undef, i32 undef,
|
||||
i32 undef, i32 undef, i32 undef, i32 undef>
|
||||
%rv = call <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16> %vv)
|
||||
%r = extractelement <8 x float> %rv, i32 0
|
||||
ret float %r
|
||||
}
|
||||
|
||||
define i16 @__float_to_half_uniform(float %v) nounwind readnone {
|
||||
%v1 = bitcast float %v to <1 x float>
|
||||
%vv = shufflevector <1 x float> %v1, <1 x float> undef,
|
||||
<8 x i32> <i32 0, i32 undef, i32 undef, i32 undef,
|
||||
i32 undef, i32 undef, i32 undef, i32 undef>
|
||||
; round to nearest even
|
||||
%rv = call <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float> %vv, i32 0)
|
||||
%r = extractelement <8 x i16> %rv, i32 0
|
||||
ret i16 %r
|
||||
}
|
||||
125
builtins/target-avx11-x2.ll
Normal file
125
builtins/target-avx11-x2.ll
Normal file
@@ -0,0 +1,125 @@
|
||||
;; Copyright (c) 2012-2013, Intel Corporation
|
||||
;; All rights reserved.
|
||||
;;
|
||||
;; Redistribution and use in source and binary forms, with or without
|
||||
;; modification, are permitted provided that the following conditions are
|
||||
;; met:
|
||||
;;
|
||||
;; * Redistributions of source code must retain the above copyright
|
||||
;; notice, this list of conditions and the following disclaimer.
|
||||
;;
|
||||
;; * Redistributions in binary form must reproduce the above copyright
|
||||
;; notice, this list of conditions and the following disclaimer in the
|
||||
;; documentation and/or other materials provided with the distribution.
|
||||
;;
|
||||
;; * Neither the name of Intel Corporation nor the names of its
|
||||
;; contributors may be used to endorse or promote products derived from
|
||||
;; this software without specific prior written permission.
|
||||
;;
|
||||
;;
|
||||
;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
||||
;; IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
||||
;; TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
|
||||
;; PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
|
||||
;; OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||
;; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
;; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
;; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
;; LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
;; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
;; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
include(`target-avx-x2.ll')
|
||||
|
||||
rdrand_definition()
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; int min/max
|
||||
|
||||
define <16 x i32> @__min_varying_int32(<16 x i32>, <16 x i32>) nounwind readonly alwaysinline {
|
||||
binary4to16(ret, i32, @llvm.x86.sse41.pminsd, %0, %1)
|
||||
ret <16 x i32> %ret
|
||||
}
|
||||
|
||||
define <16 x i32> @__max_varying_int32(<16 x i32>, <16 x i32>) nounwind readonly alwaysinline {
|
||||
binary4to16(ret, i32, @llvm.x86.sse41.pmaxsd, %0, %1)
|
||||
ret <16 x i32> %ret
|
||||
}
|
||||
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; unsigned int min/max
|
||||
|
||||
define <16 x i32> @__min_varying_uint32(<16 x i32>, <16 x i32>) nounwind readonly alwaysinline {
|
||||
binary4to16(ret, i32, @llvm.x86.sse41.pminud, %0, %1)
|
||||
ret <16 x i32> %ret
|
||||
}
|
||||
|
||||
define <16 x i32> @__max_varying_uint32(<16 x i32>, <16 x i32>) nounwind readonly alwaysinline {
|
||||
binary4to16(ret, i32, @llvm.x86.sse41.pmaxud, %0, %1)
|
||||
ret <16 x i32> %ret
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; gather
|
||||
|
||||
gen_gather(i8)
|
||||
gen_gather(i16)
|
||||
gen_gather(i32)
|
||||
gen_gather(float)
|
||||
gen_gather(i64)
|
||||
gen_gather(double)
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; float/half conversions
|
||||
|
||||
declare <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16>) nounwind readnone
|
||||
; 0 is round nearest even
|
||||
declare <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float>, i32) nounwind readnone
|
||||
|
||||
define <16 x float> @__half_to_float_varying(<16 x i16> %v) nounwind readnone {
|
||||
%r_0 = shufflevector <16 x i16> %v, <16 x i16> undef,
|
||||
<8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
||||
%vr_0 = call <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16> %r_0)
|
||||
%r_1 = shufflevector <16 x i16> %v, <16 x i16> undef,
|
||||
<8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
|
||||
%vr_1 = call <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16> %r_1)
|
||||
%r = shufflevector <8 x float> %vr_0, <8 x float> %vr_1,
|
||||
<16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
|
||||
i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
|
||||
ret <16 x float> %r
|
||||
}
|
||||
|
||||
define <16 x i16> @__float_to_half_varying(<16 x float> %v) nounwind readnone {
|
||||
%r_0 = shufflevector <16 x float> %v, <16 x float> undef,
|
||||
<8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
||||
%vr_0 = call <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float> %r_0, i32 0)
|
||||
%r_1 = shufflevector <16 x float> %v, <16 x float> undef,
|
||||
<8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
|
||||
%vr_1 = call <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float> %r_1, i32 0)
|
||||
%r = shufflevector <8 x i16> %vr_0, <8 x i16> %vr_1,
|
||||
<16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
|
||||
i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
|
||||
ret <16 x i16> %r
|
||||
}
|
||||
|
||||
define float @__half_to_float_uniform(i16 %v) nounwind readnone {
|
||||
%v1 = bitcast i16 %v to <1 x i16>
|
||||
%vv = shufflevector <1 x i16> %v1, <1 x i16> undef,
|
||||
<8 x i32> <i32 0, i32 undef, i32 undef, i32 undef,
|
||||
i32 undef, i32 undef, i32 undef, i32 undef>
|
||||
%rv = call <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16> %vv)
|
||||
%r = extractelement <8 x float> %rv, i32 0
|
||||
ret float %r
|
||||
}
|
||||
|
||||
define i16 @__float_to_half_uniform(float %v) nounwind readnone {
|
||||
%v1 = bitcast float %v to <1 x float>
|
||||
%vv = shufflevector <1 x float> %v1, <1 x float> undef,
|
||||
<8 x i32> <i32 0, i32 undef, i32 undef, i32 undef,
|
||||
i32 undef, i32 undef, i32 undef, i32 undef>
|
||||
; round to nearest even
|
||||
%rv = call <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float> %vv, i32 0)
|
||||
%r = extractelement <8 x i16> %rv, i32 0
|
||||
ret i16 %r
|
||||
}
|
||||
110
builtins/target-avx11.ll
Normal file
110
builtins/target-avx11.ll
Normal file
@@ -0,0 +1,110 @@
|
||||
;; Copyright (c) 2012-2013, Intel Corporation
|
||||
;; All rights reserved.
|
||||
;;
|
||||
;; Redistribution and use in source and binary forms, with or without
|
||||
;; modification, are permitted provided that the following conditions are
|
||||
;; met:
|
||||
;;
|
||||
;; * Redistributions of source code must retain the above copyright
|
||||
;; notice, this list of conditions and the following disclaimer.
|
||||
;;
|
||||
;; * Redistributions in binary form must reproduce the above copyright
|
||||
;; notice, this list of conditions and the following disclaimer in the
|
||||
;; documentation and/or other materials provided with the distribution.
|
||||
;;
|
||||
;; * Neither the name of Intel Corporation nor the names of its
|
||||
;; contributors may be used to endorse or promote products derived from
|
||||
;; this software without specific prior written permission.
|
||||
;;
|
||||
;;
|
||||
;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
||||
;; IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
||||
;; TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
|
||||
;; PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
|
||||
;; OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||
;; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
;; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
;; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
;; LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
;; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
;; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
include(`target-avx.ll')
|
||||
|
||||
rdrand_definition()
|
||||
saturation_arithmetic()
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; int min/max
|
||||
|
||||
define <8 x i32> @__min_varying_int32(<8 x i32>, <8 x i32>) nounwind readonly alwaysinline {
|
||||
binary4to8(ret, i32, @llvm.x86.sse41.pminsd, %0, %1)
|
||||
ret <8 x i32> %ret
|
||||
}
|
||||
|
||||
define <8 x i32> @__max_varying_int32(<8 x i32>, <8 x i32>) nounwind readonly alwaysinline {
|
||||
binary4to8(ret, i32, @llvm.x86.sse41.pmaxsd, %0, %1)
|
||||
ret <8 x i32> %ret
|
||||
}
|
||||
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; unsigned int min/max
|
||||
|
||||
define <8 x i32> @__min_varying_uint32(<8 x i32>, <8 x i32>) nounwind readonly alwaysinline {
|
||||
binary4to8(ret, i32, @llvm.x86.sse41.pminud, %0, %1)
|
||||
ret <8 x i32> %ret
|
||||
}
|
||||
|
||||
define <8 x i32> @__max_varying_uint32(<8 x i32>, <8 x i32>) nounwind readonly alwaysinline {
|
||||
binary4to8(ret, i32, @llvm.x86.sse41.pmaxud, %0, %1)
|
||||
ret <8 x i32> %ret
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; gather
|
||||
|
||||
gen_gather(i8)
|
||||
gen_gather(i16)
|
||||
gen_gather(i32)
|
||||
gen_gather(float)
|
||||
gen_gather(i64)
|
||||
gen_gather(double)
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; float/half conversions
|
||||
|
||||
declare <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16>) nounwind readnone
|
||||
; 0 is round nearest even
|
||||
declare <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float>, i32) nounwind readnone
|
||||
|
||||
define <8 x float> @__half_to_float_varying(<8 x i16> %v) nounwind readnone {
|
||||
%r = call <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16> %v)
|
||||
ret <8 x float> %r
|
||||
}
|
||||
|
||||
define <8 x i16> @__float_to_half_varying(<8 x float> %v) nounwind readnone {
|
||||
%r = call <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float> %v, i32 0)
|
||||
ret <8 x i16> %r
|
||||
}
|
||||
|
||||
define float @__half_to_float_uniform(i16 %v) nounwind readnone {
|
||||
%v1 = bitcast i16 %v to <1 x i16>
|
||||
%vv = shufflevector <1 x i16> %v1, <1 x i16> undef,
|
||||
<8 x i32> <i32 0, i32 undef, i32 undef, i32 undef,
|
||||
i32 undef, i32 undef, i32 undef, i32 undef>
|
||||
%rv = call <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16> %vv)
|
||||
%r = extractelement <8 x float> %rv, i32 0
|
||||
ret float %r
|
||||
}
|
||||
|
||||
define i16 @__float_to_half_uniform(float %v) nounwind readnone {
|
||||
%v1 = bitcast float %v to <1 x float>
|
||||
%vv = shufflevector <1 x float> %v1, <1 x float> undef,
|
||||
<8 x i32> <i32 0, i32 undef, i32 undef, i32 undef,
|
||||
i32 undef, i32 undef, i32 undef, i32 undef>
|
||||
; round to nearest even
|
||||
%rv = call <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float> %vv, i32 0)
|
||||
%r = extractelement <8 x i16> %rv, i32 0
|
||||
ret i16 %r
|
||||
}
|
||||
342
builtins/target-avx2-i64x4.ll
Normal file
342
builtins/target-avx2-i64x4.ll
Normal file
@@ -0,0 +1,342 @@
|
||||
;; Copyright (c) 2013, Intel Corporation
|
||||
;; All rights reserved.
|
||||
;;
|
||||
;; Redistribution and use in source and binary forms, with or without
|
||||
;; modification, are permitted provided that the following conditions are
|
||||
;; met:
|
||||
;;
|
||||
;; * Redistributions of source code must retain the above copyright
|
||||
;; notice, this list of conditions and the following disclaimer.
|
||||
;;
|
||||
;; * Redistributions in binary form must reproduce the above copyright
|
||||
;; notice, this list of conditions and the following disclaimer in the
|
||||
;; documentation and/or other materials provided with the distribution.
|
||||
;;
|
||||
;; * Neither the name of Intel Corporation nor the names of its
|
||||
;; contributors may be used to endorse or promote products derived from
|
||||
;; this software without specific prior written permission.
|
||||
;;
|
||||
;;
|
||||
;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
||||
;; IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
||||
;; TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
|
||||
;; PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
|
||||
;; OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||
;; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
;; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
;; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
;; LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
;; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
;; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
define(`HAVE_GATHER', `1')
|
||||
|
||||
include(`target-avx1-i64x4base.ll')
|
||||
|
||||
rdrand_definition()
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; int min/max
|
||||
|
||||
;; declare <4 x i32> @llvm.x86.sse41.pminsd(<4 x i32>, <4 x i32>) nounwind readnone
|
||||
;; declare <4 x i32> @llvm.x86.sse41.pmaxsd(<4 x i32>, <4 x i32>) nounwind readonly
|
||||
|
||||
define <4 x i32> @__min_varying_int32(<4 x i32>, <4 x i32>) nounwind readonly alwaysinline {
|
||||
%m = call <4 x i32> @llvm.x86.sse41.pminsd(<4 x i32> %0, <4 x i32> %1)
|
||||
ret <4 x i32> %m
|
||||
}
|
||||
|
||||
define <4 x i32> @__max_varying_int32(<4 x i32>, <4 x i32>) nounwind readonly alwaysinline {
|
||||
%m = call <4 x i32> @llvm.x86.sse41.pmaxsd(<4 x i32> %0, <4 x i32> %1)
|
||||
ret <4 x i32> %m
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; unsigned int min/max
|
||||
|
||||
;; declare <4 x i32> @llvm.x86.sse41.pminud(<4 x i32>, <4 x i32>) nounwind readonly
|
||||
;; declare <4 x i32> @llvm.x86.sse41.pmaxud(<4 x i32>, <4 x i32>) nounwind readonly
|
||||
|
||||
define <4 x i32> @__min_varying_uint32(<4 x i32>, <4 x i32>) nounwind readonly alwaysinline {
|
||||
%m = call <4 x i32> @llvm.x86.sse41.pminud(<4 x i32> %0, <4 x i32> %1)
|
||||
ret <4 x i32> %m
|
||||
}
|
||||
|
||||
define <4 x i32> @__max_varying_uint32(<4 x i32>, <4 x i32>) nounwind readonly alwaysinline {
|
||||
%m = call <4 x i32> @llvm.x86.sse41.pmaxud(<4 x i32> %0, <4 x i32> %1)
|
||||
ret <4 x i32> %m
|
||||
}
|
||||
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; float/half conversions
|
||||
|
||||
|
||||
|
||||
define(`expand_4to8', `
|
||||
%$3 = shufflevector <4 x $1> %$2, <4 x $1> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
|
||||
')
|
||||
define(`extract_4from8', `
|
||||
%$3 = shufflevector <8 x $1> %$2, <8 x $1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
||||
')
|
||||
|
||||
declare <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16>) nounwind readnone
|
||||
; 0 is round nearest even
|
||||
declare <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float>, i32) nounwind readnone
|
||||
|
||||
define <4 x float> @__half_to_float_varying(<4 x i16> %v4) nounwind readnone {
|
||||
expand_4to8(i16, v4, v)
|
||||
%r = call <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16> %v)
|
||||
extract_4from8(float, r, ret)
|
||||
ret <4 x float> %ret
|
||||
}
|
||||
|
||||
define <4 x i16> @__float_to_half_varying(<4 x float> %v4) nounwind readnone {
|
||||
expand_4to8(float, v4, v)
|
||||
%r = call <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float> %v, i32 0)
|
||||
extract_4from8(i16, r, ret)
|
||||
ret <4 x i16> %ret
|
||||
}
|
||||
|
||||
define float @__half_to_float_uniform(i16 %v) nounwind readnone {
|
||||
%v1 = bitcast i16 %v to <1 x i16>
|
||||
%vv = shufflevector <1 x i16> %v1, <1 x i16> undef,
|
||||
<8 x i32> <i32 0, i32 undef, i32 undef, i32 undef,
|
||||
i32 undef, i32 undef, i32 undef, i32 undef>
|
||||
%rv = call <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16> %vv)
|
||||
%r = extractelement <8 x float> %rv, i32 0
|
||||
ret float %r
|
||||
}
|
||||
|
||||
define i16 @__float_to_half_uniform(float %v) nounwind readnone {
|
||||
%v1 = bitcast float %v to <1 x float>
|
||||
%vv = shufflevector <1 x float> %v1, <1 x float> undef,
|
||||
<8 x i32> <i32 0, i32 undef, i32 undef, i32 undef,
|
||||
i32 undef, i32 undef, i32 undef, i32 undef>
|
||||
; round to nearest even
|
||||
%rv = call <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float> %vv, i32 0)
|
||||
%r = extractelement <8 x i16> %rv, i32 0
|
||||
ret i16 %r
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; gather
|
||||
|
||||
declare void @llvm.trap() noreturn nounwind
|
||||
|
||||
gen_gather(i8)
|
||||
gen_gather(i16)
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; int32 gathers
|
||||
|
||||
declare <4 x i32> @llvm.x86.avx2.gather.d.d(<4 x i32> %target, i8 * %ptr,
|
||||
<4 x i32> %indices, <4 x i32> %mask, i8 %scale) readonly nounwind
|
||||
declare <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> %target, i8 * %ptr,
|
||||
<4 x i64> %indices, <4 x i32> %mask, i8 %scale) readonly nounwind
|
||||
|
||||
define <4 x i32> @__gather_base_offsets32_i32(i8 * %ptr,
|
||||
i32 %scale, <4 x i32> %offsets,
|
||||
<4 x i64> %vecmask64) nounwind readonly alwaysinline {
|
||||
%scale8 = trunc i32 %scale to i8
|
||||
%vecmask = trunc <4 x i64> %vecmask64 to <4 x i32>
|
||||
|
||||
%v = call <4 x i32> @llvm.x86.avx2.gather.d.d(<4 x i32> undef, i8 * %ptr,
|
||||
<4 x i32> %offsets, <4 x i32> %vecmask, i8 %scale8)
|
||||
ret <4 x i32> %v
|
||||
}
|
||||
|
||||
|
||||
define <4 x i32> @__gather_base_offsets64_i32(i8 * %ptr,
|
||||
i32 %scale, <4 x i64> %offsets,
|
||||
<4 x i64> %vecmask64) nounwind readonly alwaysinline {
|
||||
%scale8 = trunc i32 %scale to i8
|
||||
%vecmask = trunc <4 x i64> %vecmask64 to <4 x i32>
|
||||
|
||||
%v = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8 * %ptr,
|
||||
<4 x i64> %offsets, <4 x i32> %vecmask, i8 %scale8)
|
||||
|
||||
ret <4 x i32> %v
|
||||
}
|
||||
|
||||
|
||||
define <4 x i32> @__gather32_i32(<4 x i32> %ptrs,
|
||||
<4 x i64> %vecmask64) nounwind readonly alwaysinline {
|
||||
|
||||
%vecmask = trunc <4 x i64> %vecmask64 to <4 x i32>
|
||||
|
||||
%v = call <4 x i32> @llvm.x86.avx2.gather.d.d(<4 x i32> undef, i8 * null,
|
||||
<4 x i32> %ptrs, <4 x i32> %vecmask, i8 1)
|
||||
|
||||
ret <4 x i32> %v
|
||||
}
|
||||
|
||||
|
||||
define <4 x i32> @__gather64_i32(<4 x i64> %ptrs,
|
||||
<4 x i64> %vecmask64) nounwind readonly alwaysinline {
|
||||
%vecmask = trunc <4 x i64> %vecmask64 to <4 x i32>
|
||||
|
||||
%v = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8 * null,
|
||||
<4 x i64> %ptrs, <4 x i32> %vecmask, i8 1)
|
||||
|
||||
ret <4 x i32> %v
|
||||
}
|
||||
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; float gathers
|
||||
|
||||
declare <4 x float> @llvm.x86.avx2.gather.d.ps(<4 x float> %target, i8 * %ptr,
|
||||
<4 x i32> %indices, <4 x float> %mask, i8 %scale8) readonly nounwind
|
||||
declare <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> %target, i8 * %ptr,
|
||||
<4 x i64> %indices, <4 x float> %mask, i8 %scale8) readonly nounwind
|
||||
|
||||
define <4 x float> @__gather_base_offsets32_float(i8 * %ptr,
|
||||
i32 %scale, <4 x i32> %offsets,
|
||||
<4 x i64> %vecmask64) nounwind readonly alwaysinline {
|
||||
%scale8 = trunc i32 %scale to i8
|
||||
%vecmask = trunc <4 x i64> %vecmask64 to <4 x i32>
|
||||
%mask = bitcast <4 x i32> %vecmask to <4 x float>
|
||||
|
||||
%v = call <4 x float> @llvm.x86.avx2.gather.d.ps(<4 x float> undef, i8 * %ptr,
|
||||
<4 x i32> %offsets, <4 x float> %mask, i8 %scale8)
|
||||
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
|
||||
define <4 x float> @__gather_base_offsets64_float(i8 * %ptr,
|
||||
i32 %scale, <4 x i64> %offsets,
|
||||
<4 x i64> %vecmask64) nounwind readonly alwaysinline {
|
||||
%scale8 = trunc i32 %scale to i8
|
||||
%vecmask = trunc <4 x i64> %vecmask64 to <4 x i32>
|
||||
%mask = bitcast <4 x i32> %vecmask to <4 x float>
|
||||
|
||||
%v = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8 * %ptr,
|
||||
<4 x i64> %offsets, <4 x float> %mask, i8 %scale8)
|
||||
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
|
||||
define <4 x float> @__gather32_float(<4 x i32> %ptrs,
|
||||
<4 x i64> %vecmask64) nounwind readonly alwaysinline {
|
||||
%vecmask = trunc <4 x i64> %vecmask64 to <4 x i32>
|
||||
%mask = bitcast <4 x i32> %vecmask to <4 x float>
|
||||
|
||||
%v = call <4 x float> @llvm.x86.avx2.gather.d.ps(<4 x float> undef, i8 * null,
|
||||
<4 x i32> %ptrs, <4 x float> %mask, i8 1)
|
||||
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
|
||||
define <4 x float> @__gather64_float(<4 x i64> %ptrs,
|
||||
<4 x i64> %vecmask64) nounwind readonly alwaysinline {
|
||||
%vecmask = trunc <4 x i64> %vecmask64 to <4 x i32>
|
||||
%mask = bitcast <4 x i32> %vecmask to <4 x float>
|
||||
|
||||
%v = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8 * null,
|
||||
<4 x i64> %ptrs, <4 x float> %mask, i8 1)
|
||||
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; int64 gathers
|
||||
|
||||
declare <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> %target, i8 * %ptr,
|
||||
<4 x i32> %indices, <4 x i64> %mask, i8 %scale) readonly nounwind
|
||||
declare <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> %target, i8 * %ptr,
|
||||
<4 x i64> %indices, <4 x i64> %mask, i8 %scale) readonly nounwind
|
||||
|
||||
define <4 x i64> @__gather_base_offsets32_i64(i8 * %ptr,
|
||||
i32 %scale, <4 x i32> %offsets,
|
||||
<4 x i64> %vecmask) nounwind readonly alwaysinline {
|
||||
%scale8 = trunc i32 %scale to i8
|
||||
|
||||
%v = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8 * %ptr,
|
||||
<4 x i32> %offsets, <4 x i64> %vecmask, i8 %scale8)
|
||||
|
||||
ret <4 x i64> %v
|
||||
}
|
||||
|
||||
|
||||
define <4 x i64> @__gather_base_offsets64_i64(i8 * %ptr,
|
||||
i32 %scale, <4 x i64> %offsets,
|
||||
<4 x i64> %vecmask) nounwind readonly alwaysinline {
|
||||
%scale8 = trunc i32 %scale to i8
|
||||
|
||||
%v = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8 * %ptr,
|
||||
<4 x i64> %offsets, <4 x i64> %vecmask, i8 %scale8)
|
||||
|
||||
ret <4 x i64> %v
|
||||
}
|
||||
|
||||
|
||||
define <4 x i64> @__gather32_i64(<4 x i32> %ptrs,
|
||||
<4 x i64> %vecmask) nounwind readonly alwaysinline {
|
||||
|
||||
%v = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8 * null,
|
||||
<4 x i32> %ptrs, <4 x i64> %vecmask, i8 1)
|
||||
ret <4 x i64> %v
|
||||
}
|
||||
|
||||
|
||||
define <4 x i64> @__gather64_i64(<4 x i64> %ptrs,
|
||||
<4 x i64> %vecmask) nounwind readonly alwaysinline {
|
||||
%v = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8 * null,
|
||||
<4 x i64> %ptrs, <4 x i64> %vecmask, i8 1)
|
||||
ret <4 x i64> %v
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; double gathers
|
||||
|
||||
declare <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> %target, i8 * %ptr,
|
||||
<4 x i64> %indices, <4 x double> %mask, i8 %scale) readonly nounwind
|
||||
declare <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> %target, i8 * %ptr,
|
||||
<4 x i32> %indices, <4 x double> %mask, i8 %scale) readonly nounwind
|
||||
|
||||
define <4 x double> @__gather_base_offsets32_double(i8 * %ptr,
|
||||
i32 %scale, <4 x i32> %offsets,
|
||||
<4 x i64> %vecmask64) nounwind readonly alwaysinline {
|
||||
%scale8 = trunc i32 %scale to i8
|
||||
%vecmask = bitcast <4 x i64> %vecmask64 to <4 x double>
|
||||
|
||||
%v = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8 * %ptr,
|
||||
<4 x i32> %offsets, <4 x double> %vecmask, i8 %scale8)
|
||||
ret <4 x double> %v
|
||||
}
|
||||
|
||||
define <4 x double> @__gather_base_offsets64_double(i8 * %ptr,
|
||||
i32 %scale, <4 x i64> %offsets,
|
||||
<4 x i64> %vecmask64) nounwind readonly alwaysinline {
|
||||
%scale8 = trunc i32 %scale to i8
|
||||
%vecmask = bitcast <4 x i64> %vecmask64 to <4 x double>
|
||||
|
||||
%v = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8 * %ptr,
|
||||
<4 x i64> %offsets, <4 x double> %vecmask, i8 %scale8)
|
||||
|
||||
ret <4 x double> %v
|
||||
}
|
||||
|
||||
define <4 x double> @__gather32_double(<4 x i32> %ptrs,
|
||||
<4 x i64> %vecmask64) nounwind readonly alwaysinline {
|
||||
%vecmask = bitcast <4 x i64> %vecmask64 to <4 x double>
|
||||
|
||||
%v = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8 * null,
|
||||
<4 x i32> %ptrs, <4 x double> %vecmask, i8 1)
|
||||
|
||||
ret <4 x double> %v
|
||||
}
|
||||
|
||||
define <4 x double> @__gather64_double(<4 x i64> %ptrs,
|
||||
<4 x i64> %vecmask64) nounwind readonly alwaysinline {
|
||||
%vecmask = bitcast <4 x i64> %vecmask64 to <4 x double>
|
||||
|
||||
%v = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8 * null,
|
||||
<4 x i64> %ptrs, <4 x double> %vecmask, i8 1)
|
||||
|
||||
ret <4 x double> %v
|
||||
}
|
||||
538
builtins/target-avx2-x2.ll
Normal file
538
builtins/target-avx2-x2.ll
Normal file
@@ -0,0 +1,538 @@
|
||||
;; Copyright (c) 2010-2013, Intel Corporation
|
||||
;; All rights reserved.
|
||||
;;
|
||||
;; Redistribution and use in source and binary forms, with or without
|
||||
;; modification, are permitted provided that the following conditions are
|
||||
;; met:
|
||||
;;
|
||||
;; * Redistributions of source code must retain the above copyright
|
||||
;; notice, this list of conditions and the following disclaimer.
|
||||
;;
|
||||
;; * Redistributions in binary form must reproduce the above copyright
|
||||
;; notice, this list of conditions and the following disclaimer in the
|
||||
;; documentation and/or other materials provided with the distribution.
|
||||
;;
|
||||
;; * Neither the name of Intel Corporation nor the names of its
|
||||
;; contributors may be used to endorse or promote products derived from
|
||||
;; this software without specific prior written permission.
|
||||
;;
|
||||
;;
|
||||
;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
||||
;; IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
||||
;; TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
|
||||
;; PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
|
||||
;; OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||
;; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
;; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
;; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
;; LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
;; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
;; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
define(`HAVE_GATHER', `1')
|
||||
|
||||
include(`target-avx-x2.ll')
|
||||
|
||||
rdrand_definition()
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; int min/max
|
||||
|
||||
declare <8 x i32> @llvm.x86.avx2.pmins.d(<8 x i32>, <8 x i32>) nounwind readonly
|
||||
declare <8 x i32> @llvm.x86.avx2.pmaxs.d(<8 x i32>, <8 x i32>) nounwind readonly
|
||||
|
||||
define <16 x i32> @__min_varying_int32(<16 x i32>, <16 x i32>) nounwind readonly alwaysinline {
|
||||
binary8to16(m, i32, @llvm.x86.avx2.pmins.d, %0, %1)
|
||||
ret <16 x i32> %m
|
||||
}
|
||||
|
||||
define <16 x i32> @__max_varying_int32(<16 x i32>, <16 x i32>) nounwind readonly alwaysinline {
|
||||
binary8to16(m, i32, @llvm.x86.avx2.pmaxs.d, %0, %1)
|
||||
ret <16 x i32> %m
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; unsigned int min/max
|
||||
|
||||
declare <8 x i32> @llvm.x86.avx2.pminu.d(<8 x i32>, <8 x i32>) nounwind readonly
|
||||
declare <8 x i32> @llvm.x86.avx2.pmaxu.d(<8 x i32>, <8 x i32>) nounwind readonly
|
||||
|
||||
define <16 x i32> @__min_varying_uint32(<16 x i32>, <16 x i32>) nounwind readonly alwaysinline {
|
||||
binary8to16(m, i32, @llvm.x86.avx2.pminu.d, %0, %1)
|
||||
ret <16 x i32> %m
|
||||
}
|
||||
|
||||
define <16 x i32> @__max_varying_uint32(<16 x i32>, <16 x i32>) nounwind readonly alwaysinline {
|
||||
binary8to16(m, i32, @llvm.x86.avx2.pmaxu.d, %0, %1)
|
||||
ret <16 x i32> %m
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; float/half conversions
|
||||
|
||||
declare <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16>) nounwind readnone
|
||||
; 0 is round nearest even
|
||||
declare <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float>, i32) nounwind readnone
|
||||
|
||||
define <16 x float> @__half_to_float_varying(<16 x i16> %v) nounwind readnone {
|
||||
%r_0 = shufflevector <16 x i16> %v, <16 x i16> undef,
|
||||
<8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
||||
%vr_0 = call <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16> %r_0)
|
||||
%r_1 = shufflevector <16 x i16> %v, <16 x i16> undef,
|
||||
<8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
|
||||
%vr_1 = call <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16> %r_1)
|
||||
%r = shufflevector <8 x float> %vr_0, <8 x float> %vr_1,
|
||||
<16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
|
||||
i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
|
||||
ret <16 x float> %r
|
||||
}
|
||||
|
||||
define <16 x i16> @__float_to_half_varying(<16 x float> %v) nounwind readnone {
|
||||
%r_0 = shufflevector <16 x float> %v, <16 x float> undef,
|
||||
<8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
||||
%vr_0 = call <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float> %r_0, i32 0)
|
||||
%r_1 = shufflevector <16 x float> %v, <16 x float> undef,
|
||||
<8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
|
||||
%vr_1 = call <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float> %r_1, i32 0)
|
||||
%r = shufflevector <8 x i16> %vr_0, <8 x i16> %vr_1,
|
||||
<16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
|
||||
i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
|
||||
ret <16 x i16> %r
|
||||
}
|
||||
|
||||
define float @__half_to_float_uniform(i16 %v) nounwind readnone {
|
||||
%v1 = bitcast i16 %v to <1 x i16>
|
||||
%vv = shufflevector <1 x i16> %v1, <1 x i16> undef,
|
||||
<8 x i32> <i32 0, i32 undef, i32 undef, i32 undef,
|
||||
i32 undef, i32 undef, i32 undef, i32 undef>
|
||||
%rv = call <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16> %vv)
|
||||
%r = extractelement <8 x float> %rv, i32 0
|
||||
ret float %r
|
||||
}
|
||||
|
||||
define i16 @__float_to_half_uniform(float %v) nounwind readnone {
|
||||
%v1 = bitcast float %v to <1 x float>
|
||||
%vv = shufflevector <1 x float> %v1, <1 x float> undef,
|
||||
<8 x i32> <i32 0, i32 undef, i32 undef, i32 undef,
|
||||
i32 undef, i32 undef, i32 undef, i32 undef>
|
||||
; round to nearest even
|
||||
%rv = call <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float> %vv, i32 0)
|
||||
%r = extractelement <8 x i16> %rv, i32 0
|
||||
ret i16 %r
|
||||
}
|
||||
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; gather
|
||||
|
||||
declare void @llvm.trap() noreturn nounwind
|
||||
|
||||
; $1: type
|
||||
; $2: var base name
|
||||
define(`extract_4s', `
|
||||
%$2_1 = shufflevector <16 x $1> %$2, <16 x $1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
||||
%$2_2 = shufflevector <16 x $1> %$2, <16 x $1> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
|
||||
%$2_3 = shufflevector <16 x $1> %$2, <16 x $1> undef, <4 x i32> <i32 8, i32 9, i32 10, i32 11>
|
||||
%$2_4 = shufflevector <16 x $1> %$2, <16 x $1> undef, <4 x i32> <i32 12, i32 13, i32 14, i32 15>
|
||||
')
|
||||
|
||||
; $1: type
|
||||
; $2: var base name
|
||||
define(`extract_8s', `
|
||||
%$2_1 = shufflevector <16 x $1> %$2, <16 x $1> undef,
|
||||
<8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
||||
%$2_2 = shufflevector <16 x $1> %$2, <16 x $1> undef,
|
||||
<8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
|
||||
')
|
||||
|
||||
; $1: element type
|
||||
; $2: ret name
|
||||
; $3: v1
|
||||
; $4: v2
|
||||
define(`assemble_8s', `
|
||||
%$2 = shufflevector <8 x $1> %$3, <8 x $1> %$4,
|
||||
<16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
|
||||
i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
|
||||
')
|
||||
|
||||
; $1: element type
|
||||
; $2: ret name
|
||||
; $3: v1
|
||||
; $4: v2
|
||||
; $5: v3
|
||||
; $6: v4
|
||||
define(`assemble_4s', `
|
||||
%$2_1 = shufflevector <4 x $1> %$3, <4 x $1> %$4,
|
||||
<8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
||||
%$2_2 = shufflevector <4 x $1> %$5, <4 x $1> %$6,
|
||||
<8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
||||
assemble_8s($1, $2, $2_1, $2_2)
|
||||
')
|
||||
|
||||
|
||||
gen_gather(i8)
|
||||
gen_gather(i16)
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; int32 gathers
|
||||
|
||||
declare <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32> %target, i8 * %ptr,
|
||||
<8 x i32> %indices, <8 x i32> %mask, i8 %scale) readonly nounwind
|
||||
declare <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> %target, i8 * %ptr,
|
||||
<4 x i64> %indices, <4 x i32> %mask, i8 %scale) readonly nounwind
|
||||
|
||||
define <16 x i32> @__gather_base_offsets32_i32(i8 * %ptr, i32 %scale, <16 x i32> %offsets,
|
||||
<16 x i32> %vecmask) nounwind readonly alwaysinline {
|
||||
%scale8 = trunc i32 %scale to i8
|
||||
extract_8s(i32, offsets)
|
||||
extract_8s(i32, vecmask)
|
||||
|
||||
%v1 = call <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32> undef, i8 * %ptr,
|
||||
<8 x i32> %offsets_1, <8 x i32> %vecmask_1, i8 %scale8)
|
||||
%v2 = call <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32> undef, i8 * %ptr,
|
||||
<8 x i32> %offsets_2, <8 x i32> %vecmask_2, i8 %scale8)
|
||||
|
||||
assemble_8s(i32, v, v1, v2)
|
||||
|
||||
ret <16 x i32> %v
|
||||
}
|
||||
|
||||
|
||||
define <16 x i32> @__gather_base_offsets64_i32(i8 * %ptr,
|
||||
i32 %scale, <16 x i64> %offsets,
|
||||
<16 x i32> %vecmask) nounwind readonly alwaysinline {
|
||||
%scale8 = trunc i32 %scale to i8
|
||||
|
||||
extract_4s(i32, vecmask)
|
||||
extract_4s(i64, offsets)
|
||||
|
||||
%v1 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8 * %ptr,
|
||||
<4 x i64> %offsets_1, <4 x i32> %vecmask_1, i8 %scale8)
|
||||
%v2 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8 * %ptr,
|
||||
<4 x i64> %offsets_2, <4 x i32> %vecmask_2, i8 %scale8)
|
||||
%v3 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8 * %ptr,
|
||||
<4 x i64> %offsets_3, <4 x i32> %vecmask_3, i8 %scale8)
|
||||
%v4 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8 * %ptr,
|
||||
<4 x i64> %offsets_4, <4 x i32> %vecmask_4, i8 %scale8)
|
||||
|
||||
assemble_4s(i32, v, v1, v2, v3, v4)
|
||||
|
||||
ret <16 x i32> %v
|
||||
}
|
||||
|
||||
|
||||
define <16 x i32> @__gather32_i32(<16 x i32> %ptrs,
|
||||
<16 x i32> %vecmask) nounwind readonly alwaysinline {
|
||||
extract_8s(i32, ptrs)
|
||||
extract_8s(i32, vecmask)
|
||||
|
||||
%v1 = call <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32> undef, i8 * null,
|
||||
<8 x i32> %ptrs_1, <8 x i32> %vecmask_1, i8 1)
|
||||
%v2 = call <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32> undef, i8 * null,
|
||||
<8 x i32> %ptrs_2, <8 x i32> %vecmask_2, i8 1)
|
||||
|
||||
assemble_8s(i32, v, v1, v2)
|
||||
|
||||
ret <16 x i32> %v
|
||||
}
|
||||
|
||||
|
||||
define <16 x i32> @__gather64_i32(<16 x i64> %ptrs,
|
||||
<16 x i32> %vecmask) nounwind readonly alwaysinline {
|
||||
extract_4s(i64, ptrs)
|
||||
extract_4s(i32, vecmask)
|
||||
|
||||
%v1 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8 * null,
|
||||
<4 x i64> %ptrs_1, <4 x i32> %vecmask_1, i8 1)
|
||||
%v2 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8 * null,
|
||||
<4 x i64> %ptrs_2, <4 x i32> %vecmask_2, i8 1)
|
||||
%v3 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8 * null,
|
||||
<4 x i64> %ptrs_3, <4 x i32> %vecmask_3, i8 1)
|
||||
%v4 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8 * null,
|
||||
<4 x i64> %ptrs_4, <4 x i32> %vecmask_4, i8 1)
|
||||
|
||||
assemble_4s(i32, v, v1, v2, v3, v4)
|
||||
|
||||
ret <16 x i32> %v
|
||||
}
|
||||
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; float gathers
|
||||
|
||||
declare <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> %target, i8 * %ptr,
|
||||
<8 x i32> %indices, <8 x float> %mask, i8 %scale8) readonly nounwind
|
||||
declare <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> %target, i8 * %ptr,
|
||||
<4 x i64> %indices, <4 x float> %mask, i8 %scale8) readonly nounwind
|
||||
|
||||
define <16 x float> @__gather_base_offsets32_float(i8 * %ptr,
|
||||
i32 %scale, <16 x i32> %offsets,
|
||||
<16 x i32> %vecmask) nounwind readonly alwaysinline {
|
||||
%scale8 = trunc i32 %scale to i8
|
||||
%mask = bitcast <16 x i32> %vecmask to <16 x float>
|
||||
extract_8s(i32, offsets)
|
||||
extract_8s(float, mask)
|
||||
|
||||
%v1 = call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> undef, i8 * %ptr,
|
||||
<8 x i32> %offsets_1, <8 x float> %mask_1, i8 %scale8)
|
||||
%v2 = call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> undef, i8 * %ptr,
|
||||
<8 x i32> %offsets_2, <8 x float> %mask_2, i8 %scale8)
|
||||
|
||||
assemble_8s(float, v, v1, v2)
|
||||
|
||||
ret <16 x float> %v
|
||||
}
|
||||
|
||||
|
||||
define <16 x float> @__gather_base_offsets64_float(i8 * %ptr,
|
||||
i32 %scale, <16 x i64> %offsets,
|
||||
<16 x i32> %vecmask) nounwind readonly alwaysinline {
|
||||
%scale8 = trunc i32 %scale to i8
|
||||
%mask = bitcast <16 x i32> %vecmask to <16 x float>
|
||||
extract_4s(i64, offsets)
|
||||
extract_4s(float, mask)
|
||||
|
||||
%v1 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8 * %ptr,
|
||||
<4 x i64> %offsets_1, <4 x float> %mask_1, i8 %scale8)
|
||||
%v2 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8 * %ptr,
|
||||
<4 x i64> %offsets_2, <4 x float> %mask_2, i8 %scale8)
|
||||
%v3 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8 * %ptr,
|
||||
<4 x i64> %offsets_3, <4 x float> %mask_3, i8 %scale8)
|
||||
%v4 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8 * %ptr,
|
||||
<4 x i64> %offsets_4, <4 x float> %mask_4, i8 %scale8)
|
||||
|
||||
assemble_4s(float, v, v1, v2, v3, v4)
|
||||
|
||||
ret <16 x float> %v
|
||||
}
|
||||
|
||||
|
||||
define <16 x float> @__gather32_float(<16 x i32> %ptrs,
|
||||
<16 x i32> %vecmask) nounwind readonly alwaysinline {
|
||||
%mask = bitcast <16 x i32> %vecmask to <16 x float>
|
||||
extract_8s(float, mask)
|
||||
extract_8s(i32, ptrs)
|
||||
|
||||
%v1 = call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> undef, i8 * null,
|
||||
<8 x i32> %ptrs_1, <8 x float> %mask_1, i8 1)
|
||||
%v2 = call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> undef, i8 * null,
|
||||
<8 x i32> %ptrs_2, <8 x float> %mask_2, i8 1)
|
||||
|
||||
assemble_8s(float, v, v1, v2)
|
||||
|
||||
ret <16 x float> %v
|
||||
}
|
||||
|
||||
|
||||
define <16 x float> @__gather64_float(<16 x i64> %ptrs,
|
||||
<16 x i32> %vecmask) nounwind readonly alwaysinline {
|
||||
%mask = bitcast <16 x i32> %vecmask to <16 x float>
|
||||
extract_4s(i64, ptrs)
|
||||
extract_4s(float, mask)
|
||||
|
||||
%v1 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8 * null,
|
||||
<4 x i64> %ptrs_1, <4 x float> %mask_1, i8 1)
|
||||
%v2 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8 * null,
|
||||
<4 x i64> %ptrs_2, <4 x float> %mask_2, i8 1)
|
||||
%v3 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8 * null,
|
||||
<4 x i64> %ptrs_3, <4 x float> %mask_3, i8 1)
|
||||
%v4 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8 * null,
|
||||
<4 x i64> %ptrs_4, <4 x float> %mask_4, i8 1)
|
||||
|
||||
assemble_4s(float, v, v1, v2, v3, v4)
|
||||
|
||||
ret <16 x float> %v
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; int64 gathers
|
||||
|
||||
declare <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> %target, i8 * %ptr,
|
||||
<4 x i32> %indices, <4 x i64> %mask, i8 %scale) readonly nounwind
|
||||
declare <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> %target, i8 * %ptr,
|
||||
<4 x i64> %indices, <4 x i64> %mask, i8 %scale) readonly nounwind
|
||||
|
||||
define <16 x i64> @__gather_base_offsets32_i64(i8 * %ptr,
|
||||
i32 %scale, <16 x i32> %offsets,
|
||||
<16 x i32> %mask32) nounwind readonly alwaysinline {
|
||||
%scale8 = trunc i32 %scale to i8
|
||||
%vecmask = sext <16 x i32> %mask32 to <16 x i64>
|
||||
extract_4s(i32, offsets)
|
||||
extract_4s(i64, vecmask)
|
||||
|
||||
%v1 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8 * %ptr,
|
||||
<4 x i32> %offsets_1, <4 x i64> %vecmask_1, i8 %scale8)
|
||||
%v2 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8 * %ptr,
|
||||
<4 x i32> %offsets_2, <4 x i64> %vecmask_2, i8 %scale8)
|
||||
%v3 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8 * %ptr,
|
||||
<4 x i32> %offsets_3, <4 x i64> %vecmask_3, i8 %scale8)
|
||||
%v4 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8 * %ptr,
|
||||
<4 x i32> %offsets_4, <4 x i64> %vecmask_4, i8 %scale8)
|
||||
|
||||
assemble_4s(i64, v, v1, v2, v3, v4)
|
||||
|
||||
ret <16 x i64> %v
|
||||
}
|
||||
|
||||
|
||||
define <16 x i64> @__gather_base_offsets64_i64(i8 * %ptr,
|
||||
i32 %scale, <16 x i64> %offsets,
|
||||
<16 x i32> %mask32) nounwind readonly alwaysinline {
|
||||
%scale8 = trunc i32 %scale to i8
|
||||
%vecmask = sext <16 x i32> %mask32 to <16 x i64>
|
||||
extract_4s(i64, offsets)
|
||||
extract_4s(i64, vecmask)
|
||||
|
||||
%v1 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8 * %ptr,
|
||||
<4 x i64> %offsets_1, <4 x i64> %vecmask_1, i8 %scale8)
|
||||
%v2 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8 * %ptr,
|
||||
<4 x i64> %offsets_2, <4 x i64> %vecmask_2, i8 %scale8)
|
||||
%v3 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8 * %ptr,
|
||||
<4 x i64> %offsets_3, <4 x i64> %vecmask_3, i8 %scale8)
|
||||
%v4 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8 * %ptr,
|
||||
<4 x i64> %offsets_4, <4 x i64> %vecmask_4, i8 %scale8)
|
||||
|
||||
assemble_4s(i64, v, v1, v2, v3, v4)
|
||||
|
||||
ret <16 x i64> %v
|
||||
}
|
||||
|
||||
|
||||
define <16 x i64> @__gather32_i64(<16 x i32> %ptrs,
|
||||
<16 x i32> %mask32) nounwind readonly alwaysinline {
|
||||
%vecmask = sext <16 x i32> %mask32 to <16 x i64>
|
||||
extract_4s(i32, ptrs)
|
||||
extract_4s(i64, vecmask)
|
||||
|
||||
%v1 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8 * null,
|
||||
<4 x i32> %ptrs_1, <4 x i64> %vecmask_1, i8 1)
|
||||
%v2 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8 * null,
|
||||
<4 x i32> %ptrs_2, <4 x i64> %vecmask_2, i8 1)
|
||||
%v3 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8 * null,
|
||||
<4 x i32> %ptrs_3, <4 x i64> %vecmask_3, i8 1)
|
||||
%v4 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8 * null,
|
||||
<4 x i32> %ptrs_4, <4 x i64> %vecmask_4, i8 1)
|
||||
|
||||
assemble_4s(i64, v, v1, v2, v3, v4)
|
||||
|
||||
ret <16 x i64> %v
|
||||
}
|
||||
|
||||
define <16 x i64> @__gather64_i64(<16 x i64> %ptrs,
|
||||
<16 x i32> %mask32) nounwind readonly alwaysinline {
|
||||
%vecmask = sext <16 x i32> %mask32 to <16 x i64>
|
||||
extract_4s(i64, ptrs)
|
||||
extract_4s(i64, vecmask)
|
||||
|
||||
%v1 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8 * null,
|
||||
<4 x i64> %ptrs_1, <4 x i64> %vecmask_1, i8 1)
|
||||
%v2 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8 * null,
|
||||
<4 x i64> %ptrs_2, <4 x i64> %vecmask_2, i8 1)
|
||||
%v3 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8 * null,
|
||||
<4 x i64> %ptrs_3, <4 x i64> %vecmask_3, i8 1)
|
||||
%v4 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8 * null,
|
||||
<4 x i64> %ptrs_4, <4 x i64> %vecmask_4, i8 1)
|
||||
|
||||
assemble_4s(i64, v, v1, v2, v3, v4)
|
||||
|
||||
ret <16 x i64> %v
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; double gathers
|
||||
|
||||
declare <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> %target, i8 * %ptr,
|
||||
<4 x i64> %indices, <4 x double> %mask, i8 %scale) readonly nounwind
|
||||
declare <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> %target, i8 * %ptr,
|
||||
<4 x i32> %indices, <4 x double> %mask, i8 %scale) readonly nounwind
|
||||
|
||||
define <16 x double> @__gather_base_offsets32_double(i8 * %ptr,
|
||||
i32 %scale, <16 x i32> %offsets,
|
||||
<16 x i32> %mask32) nounwind readonly alwaysinline {
|
||||
%scale8 = trunc i32 %scale to i8
|
||||
%vecmask64 = sext <16 x i32> %mask32 to <16 x i64>
|
||||
%vecmask = bitcast <16 x i64> %vecmask64 to <16 x double>
|
||||
extract_4s(i32, offsets)
|
||||
extract_4s(double, vecmask)
|
||||
|
||||
%v1 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8 * %ptr,
|
||||
<4 x i32> %offsets_1, <4 x double> %vecmask_1, i8 %scale8)
|
||||
%v2 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8 * %ptr,
|
||||
<4 x i32> %offsets_2, <4 x double> %vecmask_2, i8 %scale8)
|
||||
%v3 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8 * %ptr,
|
||||
<4 x i32> %offsets_3, <4 x double> %vecmask_3, i8 %scale8)
|
||||
%v4 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8 * %ptr,
|
||||
<4 x i32> %offsets_4, <4 x double> %vecmask_4, i8 %scale8)
|
||||
|
||||
assemble_4s(double, v, v1, v2, v3, v4)
|
||||
|
||||
ret <16 x double> %v
|
||||
}
|
||||
|
||||
|
||||
define <16 x double> @__gather_base_offsets64_double(i8 * %ptr,
|
||||
i32 %scale, <16 x i64> %offsets,
|
||||
<16 x i32> %mask32) nounwind readonly alwaysinline {
|
||||
%scale8 = trunc i32 %scale to i8
|
||||
%vecmask64 = sext <16 x i32> %mask32 to <16 x i64>
|
||||
%vecmask = bitcast <16 x i64> %vecmask64 to <16 x double>
|
||||
extract_4s(i64, offsets)
|
||||
extract_4s(double, vecmask)
|
||||
|
||||
%v1 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8 * %ptr,
|
||||
<4 x i64> %offsets_1, <4 x double> %vecmask_1, i8 %scale8)
|
||||
%v2 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8 * %ptr,
|
||||
<4 x i64> %offsets_2, <4 x double> %vecmask_2, i8 %scale8)
|
||||
%v3 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8 * %ptr,
|
||||
<4 x i64> %offsets_3, <4 x double> %vecmask_3, i8 %scale8)
|
||||
%v4 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8 * %ptr,
|
||||
<4 x i64> %offsets_4, <4 x double> %vecmask_4, i8 %scale8)
|
||||
|
||||
assemble_4s(double, v, v1, v2, v3, v4)
|
||||
|
||||
ret <16 x double> %v
|
||||
}
|
||||
|
||||
|
||||
define <16 x double> @__gather32_double(<16 x i32> %ptrs,
|
||||
<16 x i32> %mask32) nounwind readonly alwaysinline {
|
||||
%vecmask64 = sext <16 x i32> %mask32 to <16 x i64>
|
||||
%vecmask = bitcast <16 x i64> %vecmask64 to <16 x double>
|
||||
extract_4s(i32, ptrs)
|
||||
extract_4s(double, vecmask)
|
||||
|
||||
%v1 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8 * null,
|
||||
<4 x i32> %ptrs_1, <4 x double> %vecmask_1, i8 1)
|
||||
%v2 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8 * null,
|
||||
<4 x i32> %ptrs_2, <4 x double> %vecmask_2, i8 1)
|
||||
%v3 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8 * null,
|
||||
<4 x i32> %ptrs_3, <4 x double> %vecmask_3, i8 1)
|
||||
%v4 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8 * null,
|
||||
<4 x i32> %ptrs_4, <4 x double> %vecmask_4, i8 1)
|
||||
|
||||
assemble_4s(double, v, v1, v2, v3, v4)
|
||||
|
||||
ret <16 x double> %v
|
||||
}
|
||||
|
||||
|
||||
define <16 x double> @__gather64_double(<16 x i64> %ptrs,
|
||||
<16 x i32> %mask32) nounwind readonly alwaysinline {
|
||||
%vecmask64 = sext <16 x i32> %mask32 to <16 x i64>
|
||||
%vecmask = bitcast <16 x i64> %vecmask64 to <16 x double>
|
||||
extract_4s(i64, ptrs)
|
||||
extract_4s(double, vecmask)
|
||||
|
||||
%v1 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8 * null,
|
||||
<4 x i64> %ptrs_1, <4 x double> %vecmask_1, i8 1)
|
||||
%v2 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8 * null,
|
||||
<4 x i64> %ptrs_2, <4 x double> %vecmask_2, i8 1)
|
||||
%v3 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8 * null,
|
||||
<4 x i64> %ptrs_3, <4 x double> %vecmask_3, i8 1)
|
||||
%v4 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8 * null,
|
||||
<4 x i64> %ptrs_4, <4 x double> %vecmask_4, i8 1)
|
||||
|
||||
assemble_4s(double, v, v1, v2, v3, v4)
|
||||
|
||||
ret <16 x double> %v
|
||||
}
|
||||
409
builtins/target-avx2.ll
Normal file
409
builtins/target-avx2.ll
Normal file
@@ -0,0 +1,409 @@
|
||||
;; Copyright (c) 2010-2013, Intel Corporation
|
||||
;; All rights reserved.
|
||||
;;
|
||||
;; Redistribution and use in source and binary forms, with or without
|
||||
;; modification, are permitted provided that the following conditions are
|
||||
;; met:
|
||||
;;
|
||||
;; * Redistributions of source code must retain the above copyright
|
||||
;; notice, this list of conditions and the following disclaimer.
|
||||
;;
|
||||
;; * Redistributions in binary form must reproduce the above copyright
|
||||
;; notice, this list of conditions and the following disclaimer in the
|
||||
;; documentation and/or other materials provided with the distribution.
|
||||
;;
|
||||
;; * Neither the name of Intel Corporation nor the names of its
|
||||
;; contributors may be used to endorse or promote products derived from
|
||||
;; this software without specific prior written permission.
|
||||
;;
|
||||
;;
|
||||
;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
||||
;; IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
||||
;; TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
|
||||
;; PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
|
||||
;; OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||
;; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
;; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
;; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
;; LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
;; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
;; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
define(`HAVE_GATHER', `1')
|
||||
|
||||
include(`target-avx.ll')
|
||||
|
||||
rdrand_definition()
|
||||
saturation_arithmetic()
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; int min/max
|
||||
|
||||
declare <8 x i32> @llvm.x86.avx2.pmins.d(<8 x i32>, <8 x i32>) nounwind readonly
|
||||
declare <8 x i32> @llvm.x86.avx2.pmaxs.d(<8 x i32>, <8 x i32>) nounwind readonly
|
||||
|
||||
define <8 x i32> @__min_varying_int32(<8 x i32>, <8 x i32>) nounwind readonly alwaysinline {
|
||||
%m = call <8 x i32> @llvm.x86.avx2.pmins.d(<8 x i32> %0, <8 x i32> %1)
|
||||
ret <8 x i32> %m
|
||||
}
|
||||
|
||||
define <8 x i32> @__max_varying_int32(<8 x i32>, <8 x i32>) nounwind readonly alwaysinline {
|
||||
%m = call <8 x i32> @llvm.x86.avx2.pmaxs.d(<8 x i32> %0, <8 x i32> %1)
|
||||
ret <8 x i32> %m
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; unsigned int min/max
|
||||
|
||||
declare <8 x i32> @llvm.x86.avx2.pminu.d(<8 x i32>, <8 x i32>) nounwind readonly
|
||||
declare <8 x i32> @llvm.x86.avx2.pmaxu.d(<8 x i32>, <8 x i32>) nounwind readonly
|
||||
|
||||
define <8 x i32> @__min_varying_uint32(<8 x i32>, <8 x i32>) nounwind readonly alwaysinline {
|
||||
%m = call <8 x i32> @llvm.x86.avx2.pminu.d(<8 x i32> %0, <8 x i32> %1)
|
||||
ret <8 x i32> %m
|
||||
}
|
||||
|
||||
define <8 x i32> @__max_varying_uint32(<8 x i32>, <8 x i32>) nounwind readonly alwaysinline {
|
||||
%m = call <8 x i32> @llvm.x86.avx2.pmaxu.d(<8 x i32> %0, <8 x i32> %1)
|
||||
ret <8 x i32> %m
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; float/half conversions
|
||||
|
||||
declare <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16>) nounwind readnone
|
||||
; 0 is round nearest even
|
||||
declare <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float>, i32) nounwind readnone
|
||||
|
||||
define <8 x float> @__half_to_float_varying(<8 x i16> %v) nounwind readnone {
|
||||
%r = call <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16> %v)
|
||||
ret <8 x float> %r
|
||||
}
|
||||
|
||||
define <8 x i16> @__float_to_half_varying(<8 x float> %v) nounwind readnone {
|
||||
%r = call <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float> %v, i32 0)
|
||||
ret <8 x i16> %r
|
||||
}
|
||||
|
||||
define float @__half_to_float_uniform(i16 %v) nounwind readnone {
|
||||
%v1 = bitcast i16 %v to <1 x i16>
|
||||
%vv = shufflevector <1 x i16> %v1, <1 x i16> undef,
|
||||
<8 x i32> <i32 0, i32 undef, i32 undef, i32 undef,
|
||||
i32 undef, i32 undef, i32 undef, i32 undef>
|
||||
%rv = call <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16> %vv)
|
||||
%r = extractelement <8 x float> %rv, i32 0
|
||||
ret float %r
|
||||
}
|
||||
|
||||
define i16 @__float_to_half_uniform(float %v) nounwind readnone {
|
||||
%v1 = bitcast float %v to <1 x float>
|
||||
%vv = shufflevector <1 x float> %v1, <1 x float> undef,
|
||||
<8 x i32> <i32 0, i32 undef, i32 undef, i32 undef,
|
||||
i32 undef, i32 undef, i32 undef, i32 undef>
|
||||
; round to nearest even
|
||||
%rv = call <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float> %vv, i32 0)
|
||||
%r = extractelement <8 x i16> %rv, i32 0
|
||||
ret i16 %r
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; gather
|
||||
|
||||
declare void @llvm.trap() noreturn nounwind
|
||||
|
||||
define(`extract_4s', `
|
||||
%$2_1 = shufflevector <8 x $1> %$2, <8 x $1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
||||
%$2_2 = shufflevector <8 x $1> %$2, <8 x $1> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
|
||||
')
|
||||
|
||||
gen_gather(i8)
|
||||
gen_gather(i16)
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; int32 gathers
|
||||
|
||||
declare <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32> %target, i8 * %ptr,
|
||||
<8 x i32> %indices, <8 x i32> %mask, i8 %scale) readonly nounwind
|
||||
declare <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> %target, i8 * %ptr,
|
||||
<4 x i64> %indices, <4 x i32> %mask, i8 %scale) readonly nounwind
|
||||
|
||||
define <8 x i32> @__gather_base_offsets32_i32(i8 * %ptr,
|
||||
i32 %scale, <8 x i32> %offsets,
|
||||
<8 x i32> %vecmask) nounwind readonly alwaysinline {
|
||||
%scale8 = trunc i32 %scale to i8
|
||||
|
||||
%v = call <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32> undef, i8 * %ptr,
|
||||
<8 x i32> %offsets, <8 x i32> %vecmask, i8 %scale8)
|
||||
|
||||
ret <8 x i32> %v
|
||||
}
|
||||
|
||||
|
||||
define <8 x i32> @__gather_base_offsets64_i32(i8 * %ptr,
|
||||
i32 %scale, <8 x i64> %offsets,
|
||||
<8 x i32> %vecmask) nounwind readonly alwaysinline {
|
||||
%scale8 = trunc i32 %scale to i8
|
||||
extract_4s(i32, vecmask)
|
||||
extract_4s(i64, offsets)
|
||||
|
||||
%v1 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8 * %ptr,
|
||||
<4 x i64> %offsets_1, <4 x i32> %vecmask_1, i8 %scale8)
|
||||
%v2 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8 * %ptr,
|
||||
<4 x i64> %offsets_2, <4 x i32> %vecmask_2, i8 %scale8)
|
||||
|
||||
%v = shufflevector <4 x i32> %v1, <4 x i32> %v2,
|
||||
<8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
||||
ret <8 x i32> %v
|
||||
}
|
||||
|
||||
|
||||
define <8 x i32> @__gather32_i32(<8 x i32> %ptrs,
|
||||
<8 x i32> %vecmask) nounwind readonly alwaysinline {
|
||||
%v = call <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32> undef, i8 * null,
|
||||
<8 x i32> %ptrs, <8 x i32> %vecmask, i8 1)
|
||||
ret <8 x i32> %v
|
||||
}
|
||||
|
||||
|
||||
define <8 x i32> @__gather64_i32(<8 x i64> %ptrs,
|
||||
<8 x i32> %vecmask) nounwind readonly alwaysinline {
|
||||
extract_4s(i64, ptrs)
|
||||
extract_4s(i32, vecmask)
|
||||
|
||||
%v1 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8 * null,
|
||||
<4 x i64> %ptrs_1, <4 x i32> %vecmask_1, i8 1)
|
||||
%v2 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8 * null,
|
||||
<4 x i64> %ptrs_2, <4 x i32> %vecmask_2, i8 1)
|
||||
|
||||
%v = shufflevector <4 x i32> %v1, <4 x i32> %v2,
|
||||
<8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
||||
ret <8 x i32> %v
|
||||
}
|
||||
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; float gathers
|
||||
|
||||
declare <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> %target, i8 * %ptr,
|
||||
<8 x i32> %indices, <8 x float> %mask, i8 %scale8) readonly nounwind
|
||||
declare <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> %target, i8 * %ptr,
|
||||
<4 x i64> %indices, <4 x float> %mask, i8 %scale8) readonly nounwind
|
||||
|
||||
define <8 x float> @__gather_base_offsets32_float(i8 * %ptr,
|
||||
i32 %scale, <8 x i32> %offsets,
|
||||
<8 x i32> %vecmask) nounwind readonly alwaysinline {
|
||||
%scale8 = trunc i32 %scale to i8
|
||||
%mask = bitcast <8 x i32> %vecmask to <8 x float>
|
||||
|
||||
%v = call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> undef, i8 * %ptr,
|
||||
<8 x i32> %offsets, <8 x float> %mask, i8 %scale8)
|
||||
|
||||
ret <8 x float> %v
|
||||
}
|
||||
|
||||
|
||||
define <8 x float> @__gather_base_offsets64_float(i8 * %ptr,
|
||||
i32 %scale, <8 x i64> %offsets,
|
||||
<8 x i32> %vecmask) nounwind readonly alwaysinline {
|
||||
%scale8 = trunc i32 %scale to i8
|
||||
%mask = bitcast <8 x i32> %vecmask to <8 x float>
|
||||
extract_4s(i64, offsets)
|
||||
extract_4s(float, mask)
|
||||
|
||||
%v1 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8 * %ptr,
|
||||
<4 x i64> %offsets_1, <4 x float> %mask_1, i8 %scale8)
|
||||
%v2 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8 * %ptr,
|
||||
<4 x i64> %offsets_2, <4 x float> %mask_2, i8 %scale8)
|
||||
|
||||
%v = shufflevector <4 x float> %v1, <4 x float> %v2,
|
||||
<8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
||||
ret <8 x float> %v
|
||||
}
|
||||
|
||||
|
||||
define <8 x float> @__gather32_float(<8 x i32> %ptrs,
|
||||
<8 x i32> %vecmask) nounwind readonly alwaysinline {
|
||||
%mask = bitcast <8 x i32> %vecmask to <8 x float>
|
||||
|
||||
%v = call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> undef, i8 * null,
|
||||
<8 x i32> %ptrs, <8 x float> %mask, i8 1)
|
||||
|
||||
ret <8 x float> %v
|
||||
}
|
||||
|
||||
|
||||
define <8 x float> @__gather64_float(<8 x i64> %ptrs,
|
||||
<8 x i32> %vecmask) nounwind readonly alwaysinline {
|
||||
%mask = bitcast <8 x i32> %vecmask to <8 x float>
|
||||
extract_4s(i64, ptrs)
|
||||
extract_4s(float, mask)
|
||||
|
||||
%v1 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8 * null,
|
||||
<4 x i64> %ptrs_1, <4 x float> %mask_1, i8 1)
|
||||
%v2 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8 * null,
|
||||
<4 x i64> %ptrs_2, <4 x float> %mask_2, i8 1)
|
||||
|
||||
%v = shufflevector <4 x float> %v1, <4 x float> %v2,
|
||||
<8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
||||
ret <8 x float> %v
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; int64 gathers
|
||||
|
||||
declare <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> %target, i8 * %ptr,
|
||||
<4 x i32> %indices, <4 x i64> %mask, i8 %scale) readonly nounwind
|
||||
declare <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> %target, i8 * %ptr,
|
||||
<4 x i64> %indices, <4 x i64> %mask, i8 %scale) readonly nounwind
|
||||
|
||||
define <8 x i64> @__gather_base_offsets32_i64(i8 * %ptr,
|
||||
i32 %scale, <8 x i32> %offsets,
|
||||
<8 x i32> %mask32) nounwind readonly alwaysinline {
|
||||
%scale8 = trunc i32 %scale to i8
|
||||
%vecmask = sext <8 x i32> %mask32 to <8 x i64>
|
||||
extract_4s(i32, offsets)
|
||||
extract_4s(i64, vecmask)
|
||||
|
||||
%v1 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8 * %ptr,
|
||||
<4 x i32> %offsets_1, <4 x i64> %vecmask_1, i8 %scale8)
|
||||
%v2 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8 * %ptr,
|
||||
<4 x i32> %offsets_2, <4 x i64> %vecmask_2, i8 %scale8)
|
||||
|
||||
%v = shufflevector <4 x i64> %v1, <4 x i64> %v2,
|
||||
<8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
||||
ret <8 x i64> %v
|
||||
}
|
||||
|
||||
|
||||
define <8 x i64> @__gather_base_offsets64_i64(i8 * %ptr,
|
||||
i32 %scale, <8 x i64> %offsets,
|
||||
<8 x i32> %mask32) nounwind readonly alwaysinline {
|
||||
%scale8 = trunc i32 %scale to i8
|
||||
%vecmask = sext <8 x i32> %mask32 to <8 x i64>
|
||||
extract_4s(i64, offsets)
|
||||
extract_4s(i64, vecmask)
|
||||
|
||||
%v1 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8 * %ptr,
|
||||
<4 x i64> %offsets_1, <4 x i64> %vecmask_1, i8 %scale8)
|
||||
%v2 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8 * %ptr,
|
||||
<4 x i64> %offsets_2, <4 x i64> %vecmask_2, i8 %scale8)
|
||||
|
||||
%v = shufflevector <4 x i64> %v1, <4 x i64> %v2,
|
||||
<8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
||||
ret <8 x i64> %v
|
||||
}
|
||||
|
||||
|
||||
define <8 x i64> @__gather32_i64(<8 x i32> %ptrs,
|
||||
<8 x i32> %mask32) nounwind readonly alwaysinline {
|
||||
%vecmask = sext <8 x i32> %mask32 to <8 x i64>
|
||||
|
||||
extract_4s(i32, ptrs)
|
||||
extract_4s(i64, vecmask)
|
||||
|
||||
%v1 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8 * null,
|
||||
<4 x i32> %ptrs_1, <4 x i64> %vecmask_1, i8 1)
|
||||
%v2 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8 * null,
|
||||
<4 x i32> %ptrs_2, <4 x i64> %vecmask_2, i8 1)
|
||||
%v = shufflevector <4 x i64> %v1, <4 x i64> %v2,
|
||||
<8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
||||
ret <8 x i64> %v
|
||||
}
|
||||
|
||||
|
||||
define <8 x i64> @__gather64_i64(<8 x i64> %ptrs,
|
||||
<8 x i32> %mask32) nounwind readonly alwaysinline {
|
||||
%vecmask = sext <8 x i32> %mask32 to <8 x i64>
|
||||
extract_4s(i64, ptrs)
|
||||
extract_4s(i64, vecmask)
|
||||
|
||||
%v1 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8 * null,
|
||||
<4 x i64> %ptrs_1, <4 x i64> %vecmask_1, i8 1)
|
||||
%v2 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8 * null,
|
||||
<4 x i64> %ptrs_2, <4 x i64> %vecmask_2, i8 1)
|
||||
|
||||
%v = shufflevector <4 x i64> %v1, <4 x i64> %v2,
|
||||
<8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
||||
ret <8 x i64> %v
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; double gathers
|
||||
|
||||
declare <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> %target, i8 * %ptr,
|
||||
<4 x i64> %indices, <4 x double> %mask, i8 %scale) readonly nounwind
|
||||
declare <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> %target, i8 * %ptr,
|
||||
<4 x i32> %indices, <4 x double> %mask, i8 %scale) readonly nounwind
|
||||
|
||||
define <8 x double> @__gather_base_offsets32_double(i8 * %ptr,
|
||||
i32 %scale, <8 x i32> %offsets,
|
||||
<8 x i32> %mask32) nounwind readonly alwaysinline {
|
||||
%scale8 = trunc i32 %scale to i8
|
||||
%vecmask64 = sext <8 x i32> %mask32 to <8 x i64>
|
||||
%vecmask = bitcast <8 x i64> %vecmask64 to <8 x double>
|
||||
extract_4s(i32, offsets)
|
||||
extract_4s(double, vecmask)
|
||||
|
||||
%v1 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8 * %ptr,
|
||||
<4 x i32> %offsets_1, <4 x double> %vecmask_1, i8 %scale8)
|
||||
%v2 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8 * %ptr,
|
||||
<4 x i32> %offsets_2, <4 x double> %vecmask_2, i8 %scale8)
|
||||
|
||||
%v = shufflevector <4 x double> %v1, <4 x double> %v2,
|
||||
<8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
||||
ret <8 x double> %v
|
||||
}
|
||||
|
||||
define <8 x double> @__gather_base_offsets64_double(i8 * %ptr,
|
||||
i32 %scale, <8 x i64> %offsets,
|
||||
<8 x i32> %mask32) nounwind readonly alwaysinline {
|
||||
%scale8 = trunc i32 %scale to i8
|
||||
%vecmask64 = sext <8 x i32> %mask32 to <8 x i64>
|
||||
%vecmask = bitcast <8 x i64> %vecmask64 to <8 x double>
|
||||
extract_4s(i64, offsets)
|
||||
extract_4s(double, vecmask)
|
||||
|
||||
%v1 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8 * %ptr,
|
||||
<4 x i64> %offsets_1, <4 x double> %vecmask_1, i8 %scale8)
|
||||
%v2 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8 * %ptr,
|
||||
<4 x i64> %offsets_2, <4 x double> %vecmask_2, i8 %scale8)
|
||||
|
||||
%v = shufflevector <4 x double> %v1, <4 x double> %v2,
|
||||
<8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
||||
ret <8 x double> %v
|
||||
}
|
||||
|
||||
define <8 x double> @__gather32_double(<8 x i32> %ptrs,
|
||||
<8 x i32> %mask32) nounwind readonly alwaysinline {
|
||||
%vecmask64 = sext <8 x i32> %mask32 to <8 x i64>
|
||||
%vecmask = bitcast <8 x i64> %vecmask64 to <8 x double>
|
||||
extract_4s(i32, ptrs)
|
||||
extract_4s(double, vecmask)
|
||||
|
||||
%v1 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8 * null,
|
||||
<4 x i32> %ptrs_1, <4 x double> %vecmask_1, i8 1)
|
||||
%v2 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8 * null,
|
||||
<4 x i32> %ptrs_2, <4 x double> %vecmask_2, i8 1)
|
||||
|
||||
%v = shufflevector <4 x double> %v1, <4 x double> %v2,
|
||||
<8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
||||
ret <8 x double> %v
|
||||
}
|
||||
|
||||
define <8 x double> @__gather64_double(<8 x i64> %ptrs,
|
||||
<8 x i32> %mask32) nounwind readonly alwaysinline {
|
||||
%vecmask64 = sext <8 x i32> %mask32 to <8 x i64>
|
||||
%vecmask = bitcast <8 x i64> %vecmask64 to <8 x double>
|
||||
extract_4s(i64, ptrs)
|
||||
extract_4s(double, vecmask)
|
||||
|
||||
%v1 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8 * null,
|
||||
<4 x i64> %ptrs_1, <4 x double> %vecmask_1, i8 1)
|
||||
%v2 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8 * null,
|
||||
<4 x i64> %ptrs_2, <4 x double> %vecmask_2, i8 1)
|
||||
|
||||
%v = shufflevector <4 x double> %v1, <4 x double> %v2,
|
||||
<8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
||||
|
||||
ret <8 x double> %v
|
||||
}
|
||||
1310
builtins/target-avx512-common.ll
Normal file
1310
builtins/target-avx512-common.ll
Normal file
File diff suppressed because it is too large
Load Diff
1036
builtins/target-generic-1.ll
Normal file
1036
builtins/target-generic-1.ll
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,4 +1,4 @@
|
||||
;; Copyright (c) 2010-2011, Intel Corporation
|
||||
;; Copyright (c) 2010-2014, Intel Corporation
|
||||
;; All rights reserved.
|
||||
;;
|
||||
;; Redistribution and use in source and binary forms, with or without
|
||||
@@ -31,4 +31,4 @@
|
||||
|
||||
define(`WIDTH',`16')
|
||||
include(`target-generic-common.ll')
|
||||
|
||||
saturation_arithmetic_novec()
|
||||
|
||||
34
builtins/target-generic-32.ll
Normal file
34
builtins/target-generic-32.ll
Normal file
@@ -0,0 +1,34 @@
|
||||
;; Copyright (c) 2010-2014, Intel Corporation
|
||||
;; All rights reserved.
|
||||
;;
|
||||
;; Redistribution and use in source and binary forms, with or without
|
||||
;; modification, are permitted provided that the following conditions are
|
||||
;; met:
|
||||
;;
|
||||
;; * Redistributions of source code must retain the above copyright
|
||||
;; notice, this list of conditions and the following disclaimer.
|
||||
;;
|
||||
;; * Redistributions in binary form must reproduce the above copyright
|
||||
;; notice, this list of conditions and the following disclaimer in the
|
||||
;; documentation and/or other materials provided with the distribution.
|
||||
;;
|
||||
;; * Neither the name of Intel Corporation nor the names of its
|
||||
;; contributors may be used to endorse or promote products derived from
|
||||
;; this software without specific prior written permission.
|
||||
;;
|
||||
;;
|
||||
;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
||||
;; IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
||||
;; TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
|
||||
;; PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
|
||||
;; OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||
;; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
;; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
;; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
;; LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
;; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
;; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
define(`WIDTH',`32')
|
||||
include(`target-generic-common.ll')
|
||||
saturation_arithmetic_novec()
|
||||
@@ -1,4 +1,4 @@
|
||||
;; Copyright (c) 2010-2011, Intel Corporation
|
||||
;; Copyright (c) 2010-2014, Intel Corporation
|
||||
;; All rights reserved.
|
||||
;;
|
||||
;; Redistribution and use in source and binary forms, with or without
|
||||
@@ -31,4 +31,4 @@
|
||||
|
||||
define(`WIDTH',`4')
|
||||
include(`target-generic-common.ll')
|
||||
|
||||
saturation_arithmetic_novec()
|
||||
|
||||
34
builtins/target-generic-64.ll
Normal file
34
builtins/target-generic-64.ll
Normal file
@@ -0,0 +1,34 @@
|
||||
;; Copyright (c) 2010-2014, Intel Corporation
|
||||
;; All rights reserved.
|
||||
;;
|
||||
;; Redistribution and use in source and binary forms, with or without
|
||||
;; modification, are permitted provided that the following conditions are
|
||||
;; met:
|
||||
;;
|
||||
;; * Redistributions of source code must retain the above copyright
|
||||
;; notice, this list of conditions and the following disclaimer.
|
||||
;;
|
||||
;; * Redistributions in binary form must reproduce the above copyright
|
||||
;; notice, this list of conditions and the following disclaimer in the
|
||||
;; documentation and/or other materials provided with the distribution.
|
||||
;;
|
||||
;; * Neither the name of Intel Corporation nor the names of its
|
||||
;; contributors may be used to endorse or promote products derived from
|
||||
;; this software without specific prior written permission.
|
||||
;;
|
||||
;;
|
||||
;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
||||
;; IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
||||
;; TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
|
||||
;; PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
|
||||
;; OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||
;; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
;; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
;; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
;; LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
;; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
;; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
define(`WIDTH',`64')
|
||||
include(`target-generic-common.ll')
|
||||
saturation_arithmetic_novec()
|
||||
@@ -1,4 +1,4 @@
|
||||
;; Copyright (c) 2010-2011, Intel Corporation
|
||||
;; Copyright (c) 2010-2014, Intel Corporation
|
||||
;; All rights reserved.
|
||||
;;
|
||||
;; Redistribution and use in source and binary forms, with or without
|
||||
@@ -31,4 +31,4 @@
|
||||
|
||||
define(`WIDTH',`8')
|
||||
include(`target-generic-common.ll')
|
||||
|
||||
saturation_arithmetic_novec()
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
;; Copyright (c) 2010-2011, Intel Corporation
|
||||
;; Copyright (c) 2010-2015, Intel Corporation
|
||||
;; All rights reserved.
|
||||
;;
|
||||
;; Redistribution and use in source and binary forms, with or without
|
||||
@@ -29,12 +29,18 @@
|
||||
;; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
;; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128-v16:16:16-v32:32:32-v4:128:128";
|
||||
|
||||
define(`MASK',`i1')
|
||||
define(`HAVE_GATHER',`1')
|
||||
define(`HAVE_SCATTER',`1')
|
||||
|
||||
include(`util.m4')
|
||||
|
||||
stdlib_core()
|
||||
scans()
|
||||
reduce_equal(WIDTH)
|
||||
rdrand_decls()
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; broadcast/rotate/shuffle
|
||||
@@ -46,6 +52,20 @@ declare <WIDTH x i16> @__smear_i16(i16) nounwind readnone
|
||||
declare <WIDTH x i32> @__smear_i32(i32) nounwind readnone
|
||||
declare <WIDTH x i64> @__smear_i64(i64) nounwind readnone
|
||||
|
||||
declare <WIDTH x float> @__setzero_float() nounwind readnone
|
||||
declare <WIDTH x double> @__setzero_double() nounwind readnone
|
||||
declare <WIDTH x i8> @__setzero_i8() nounwind readnone
|
||||
declare <WIDTH x i16> @__setzero_i16() nounwind readnone
|
||||
declare <WIDTH x i32> @__setzero_i32() nounwind readnone
|
||||
declare <WIDTH x i64> @__setzero_i64() nounwind readnone
|
||||
|
||||
declare <WIDTH x float> @__undef_float() nounwind readnone
|
||||
declare <WIDTH x double> @__undef_double() nounwind readnone
|
||||
declare <WIDTH x i8> @__undef_i8() nounwind readnone
|
||||
declare <WIDTH x i16> @__undef_i16() nounwind readnone
|
||||
declare <WIDTH x i32> @__undef_i32() nounwind readnone
|
||||
declare <WIDTH x i64> @__undef_i64() nounwind readnone
|
||||
|
||||
declare <WIDTH x float> @__broadcast_float(<WIDTH x float>, i32) nounwind readnone
|
||||
declare <WIDTH x double> @__broadcast_double(<WIDTH x double>, i32) nounwind readnone
|
||||
declare <WIDTH x i8> @__broadcast_i8(<WIDTH x i8>, i32) nounwind readnone
|
||||
@@ -60,6 +80,13 @@ declare <WIDTH x i32> @__rotate_i32(<WIDTH x i32>, i32) nounwind readnone
|
||||
declare <WIDTH x double> @__rotate_double(<WIDTH x double>, i32) nounwind readnone
|
||||
declare <WIDTH x i64> @__rotate_i64(<WIDTH x i64>, i32) nounwind readnone
|
||||
|
||||
declare <WIDTH x i8> @__shift_i8(<WIDTH x i8>, i32) nounwind readnone
|
||||
declare <WIDTH x i16> @__shift_i16(<WIDTH x i16>, i32) nounwind readnone
|
||||
declare <WIDTH x float> @__shift_float(<WIDTH x float>, i32) nounwind readnone
|
||||
declare <WIDTH x i32> @__shift_i32(<WIDTH x i32>, i32) nounwind readnone
|
||||
declare <WIDTH x double> @__shift_double(<WIDTH x double>, i32) nounwind readnone
|
||||
declare <WIDTH x i64> @__shift_i64(<WIDTH x i64>, i32) nounwind readnone
|
||||
|
||||
declare <WIDTH x i8> @__shuffle_i8(<WIDTH x i8>, <WIDTH x i32>) nounwind readnone
|
||||
declare <WIDTH x i8> @__shuffle2_i8(<WIDTH x i8>, <WIDTH x i8>,
|
||||
<WIDTH x i32>) nounwind readnone
|
||||
@@ -98,6 +125,14 @@ declare void @__aos_to_soa4_float(float * noalias %p, <WIDTH x float> * noalias
|
||||
<WIDTH x float> * noalias %out2,
|
||||
<WIDTH x float> * noalias %out3) nounwind
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; half conversion routines
|
||||
|
||||
declare float @__half_to_float_uniform(i16 %v) nounwind readnone
|
||||
declare <WIDTH x float> @__half_to_float_varying(<WIDTH x i16> %v) nounwind readnone
|
||||
declare i16 @__float_to_half_uniform(float %v) nounwind readnone
|
||||
declare <WIDTH x i16> @__float_to_half_varying(<WIDTH x float> %v) nounwind readnone
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; math
|
||||
|
||||
@@ -159,6 +194,7 @@ declare float @__rcp_uniform_float(float) nounwind readnone
|
||||
declare float @__sqrt_uniform_float(float) nounwind readnone
|
||||
declare <WIDTH x float> @__rcp_varying_float(<WIDTH x float>) nounwind readnone
|
||||
declare <WIDTH x float> @__rsqrt_varying_float(<WIDTH x float>) nounwind readnone
|
||||
|
||||
declare <WIDTH x float> @__sqrt_varying_float(<WIDTH x float>) nounwind readnone
|
||||
|
||||
declare double @__sqrt_uniform_double(double) nounwind readnone
|
||||
@@ -174,36 +210,34 @@ declare i64 @__count_trailing_zeros_i64(i64) nounwind readnone
|
||||
declare i32 @__count_leading_zeros_i32(i32) nounwind readnone
|
||||
declare i64 @__count_leading_zeros_i64(i64) nounwind readnone
|
||||
|
||||
;; svml
|
||||
|
||||
; FIXME: need either to wire these up to the 8-wide SVML entrypoints,
|
||||
; or, use the macro to call the 4-wide ones twice with our 8-wide
|
||||
; vectors...
|
||||
|
||||
declare <WIDTH x float> @__svml_sin(<WIDTH x float>)
|
||||
declare <WIDTH x float> @__svml_cos(<WIDTH x float>)
|
||||
declare void @__svml_sincos(<WIDTH x float>, <WIDTH x float> *, <WIDTH x float> *)
|
||||
declare <WIDTH x float> @__svml_tan(<WIDTH x float>)
|
||||
declare <WIDTH x float> @__svml_atan(<WIDTH x float>)
|
||||
declare <WIDTH x float> @__svml_atan2(<WIDTH x float>, <WIDTH x float>)
|
||||
declare <WIDTH x float> @__svml_exp(<WIDTH x float>)
|
||||
declare <WIDTH x float> @__svml_log(<WIDTH x float>)
|
||||
declare <WIDTH x float> @__svml_pow(<WIDTH x float>, <WIDTH x float>)
|
||||
;; svml
|
||||
|
||||
include(`svml.m4')
|
||||
svml_stubs(float,f,WIDTH)
|
||||
svml_stubs(double,d,WIDTH)
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; reductions
|
||||
|
||||
declare i32 @__movmsk(<WIDTH x i1>) nounwind readnone
|
||||
declare i64 @__movmsk(<WIDTH x i1>) nounwind readnone
|
||||
declare i1 @__any(<WIDTH x i1>) nounwind readnone
|
||||
declare i1 @__all(<WIDTH x i1>) nounwind readnone
|
||||
declare i1 @__none(<WIDTH x i1>) nounwind readnone
|
||||
|
||||
declare i16 @__reduce_add_int8(<WIDTH x i8>) nounwind readnone
|
||||
declare i32 @__reduce_add_int16(<WIDTH x i16>) nounwind readnone
|
||||
|
||||
declare float @__reduce_add_float(<WIDTH x float>) nounwind readnone
|
||||
declare float @__reduce_min_float(<WIDTH x float>) nounwind readnone
|
||||
declare float @__reduce_max_float(<WIDTH x float>) nounwind readnone
|
||||
|
||||
declare i32 @__reduce_add_int32(<WIDTH x i32>) nounwind readnone
|
||||
declare i64 @__reduce_add_int32(<WIDTH x i32>) nounwind readnone
|
||||
declare i32 @__reduce_min_int32(<WIDTH x i32>) nounwind readnone
|
||||
declare i32 @__reduce_max_int32(<WIDTH x i32>) nounwind readnone
|
||||
|
||||
declare i32 @__reduce_add_uint32(<WIDTH x i32>) nounwind readnone
|
||||
declare i32 @__reduce_min_uint32(<WIDTH x i32>) nounwind readnone
|
||||
declare i32 @__reduce_max_uint32(<WIDTH x i32>) nounwind readnone
|
||||
|
||||
@@ -214,82 +248,99 @@ declare double @__reduce_max_double(<WIDTH x double>) nounwind readnone
|
||||
declare i64 @__reduce_add_int64(<WIDTH x i64>) nounwind readnone
|
||||
declare i64 @__reduce_min_int64(<WIDTH x i64>) nounwind readnone
|
||||
declare i64 @__reduce_max_int64(<WIDTH x i64>) nounwind readnone
|
||||
|
||||
declare i64 @__reduce_add_uint64(<WIDTH x i64>) nounwind readnone
|
||||
declare i64 @__reduce_min_uint64(<WIDTH x i64>) nounwind readnone
|
||||
declare i64 @__reduce_max_uint64(<WIDTH x i64>) nounwind readnone
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; unaligned loads/loads+broadcasts
|
||||
|
||||
load_and_broadcast(WIDTH, i8, 8)
|
||||
load_and_broadcast(WIDTH, i16, 16)
|
||||
load_and_broadcast(WIDTH, i32, 32)
|
||||
load_and_broadcast(WIDTH, i64, 64)
|
||||
|
||||
declare <WIDTH x i8> @__masked_load_8(i8 * nocapture, <WIDTH x i1> %mask) nounwind readonly
|
||||
declare <WIDTH x i16> @__masked_load_16(i8 * nocapture, <WIDTH x i1> %mask) nounwind readonly
|
||||
declare <WIDTH x i32> @__masked_load_32(i8 * nocapture, <WIDTH x i1> %mask) nounwind readonly
|
||||
declare <WIDTH x i64> @__masked_load_64(i8 * nocapture, <WIDTH x i1> %mask) nounwind readonly
|
||||
declare <WIDTH x i8> @__masked_load_i8(i8 * nocapture, <WIDTH x i1> %mask) nounwind readonly
|
||||
declare <WIDTH x i16> @__masked_load_i16(i8 * nocapture, <WIDTH x i1> %mask) nounwind readonly
|
||||
declare <WIDTH x i32> @__masked_load_i32(i8 * nocapture, <WIDTH x i1> %mask) nounwind readonly
|
||||
declare <WIDTH x float> @__masked_load_float(i8 * nocapture, <WIDTH x i1> %mask) nounwind readonly
|
||||
declare <WIDTH x i64> @__masked_load_i64(i8 * nocapture, <WIDTH x i1> %mask) nounwind readonly
|
||||
declare <WIDTH x double> @__masked_load_double(i8 * nocapture, <WIDTH x i1> %mask) nounwind readonly
|
||||
|
||||
declare void @__masked_store_8(<WIDTH x i8>* nocapture, <WIDTH x i8>,
|
||||
declare void @__masked_store_i8(<WIDTH x i8>* nocapture, <WIDTH x i8>,
|
||||
<WIDTH x i1>) nounwind
|
||||
declare void @__masked_store_16(<WIDTH x i16>* nocapture, <WIDTH x i16>,
|
||||
<WIDTH x i1>) nounwind
|
||||
declare void @__masked_store_32(<WIDTH x i32>* nocapture, <WIDTH x i32>,
|
||||
<WIDTH x i1>) nounwind
|
||||
declare void @__masked_store_64(<WIDTH x i64>* nocapture, <WIDTH x i64>,
|
||||
<WIDTH x i1> %mask) nounwind
|
||||
declare void @__masked_store_i16(<WIDTH x i16>* nocapture, <WIDTH x i16>,
|
||||
<WIDTH x i1>) nounwind
|
||||
declare void @__masked_store_i32(<WIDTH x i32>* nocapture, <WIDTH x i32>,
|
||||
<WIDTH x i1>) nounwind
|
||||
declare void @__masked_store_float(<WIDTH x float>* nocapture, <WIDTH x float>,
|
||||
<WIDTH x i1>) nounwind
|
||||
declare void @__masked_store_i64(<WIDTH x i64>* nocapture, <WIDTH x i64>,
|
||||
<WIDTH x i1> %mask) nounwind
|
||||
declare void @__masked_store_double(<WIDTH x double>* nocapture, <WIDTH x double>,
|
||||
<WIDTH x i1> %mask) nounwind
|
||||
|
||||
define void @__masked_store_blend_8(<WIDTH x i8>* nocapture, <WIDTH x i8>,
|
||||
<WIDTH x i1>) nounwind {
|
||||
%v = load <WIDTH x i8> * %0
|
||||
|
||||
define void @__masked_store_blend_i8(<WIDTH x i8>* nocapture, <WIDTH x i8>,
|
||||
<WIDTH x i1>) nounwind alwaysinline {
|
||||
%v = load PTR_OP_ARGS(`<WIDTH x i8> ') %0
|
||||
%v1 = select <WIDTH x i1> %2, <WIDTH x i8> %1, <WIDTH x i8> %v
|
||||
store <WIDTH x i8> %v1, <WIDTH x i8> * %0
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @__masked_store_blend_16(<WIDTH x i16>* nocapture, <WIDTH x i16>,
|
||||
<WIDTH x i1>) nounwind {
|
||||
%v = load <WIDTH x i16> * %0
|
||||
define void @__masked_store_blend_i16(<WIDTH x i16>* nocapture, <WIDTH x i16>,
|
||||
<WIDTH x i1>) nounwind alwaysinline {
|
||||
%v = load PTR_OP_ARGS(`<WIDTH x i16> ') %0
|
||||
%v1 = select <WIDTH x i1> %2, <WIDTH x i16> %1, <WIDTH x i16> %v
|
||||
store <WIDTH x i16> %v1, <WIDTH x i16> * %0
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @__masked_store_blend_32(<WIDTH x i32>* nocapture, <WIDTH x i32>,
|
||||
<WIDTH x i1>) nounwind {
|
||||
%v = load <WIDTH x i32> * %0
|
||||
define void @__masked_store_blend_i32(<WIDTH x i32>* nocapture, <WIDTH x i32>,
|
||||
<WIDTH x i1>) nounwind alwaysinline {
|
||||
%v = load PTR_OP_ARGS(`<WIDTH x i32> ') %0
|
||||
%v1 = select <WIDTH x i1> %2, <WIDTH x i32> %1, <WIDTH x i32> %v
|
||||
store <WIDTH x i32> %v1, <WIDTH x i32> * %0
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @__masked_store_blend_64(<WIDTH x i64>* nocapture,
|
||||
<WIDTH x i64>, <WIDTH x i1>) nounwind {
|
||||
%v = load <WIDTH x i64> * %0
|
||||
define void @__masked_store_blend_float(<WIDTH x float>* nocapture, <WIDTH x float>,
|
||||
<WIDTH x i1>) nounwind alwaysinline {
|
||||
%v = load PTR_OP_ARGS(`<WIDTH x float> ') %0
|
||||
%v1 = select <WIDTH x i1> %2, <WIDTH x float> %1, <WIDTH x float> %v
|
||||
store <WIDTH x float> %v1, <WIDTH x float> * %0
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @__masked_store_blend_i64(<WIDTH x i64>* nocapture,
|
||||
<WIDTH x i64>, <WIDTH x i1>) nounwind alwaysinline {
|
||||
%v = load PTR_OP_ARGS(`<WIDTH x i64> ') %0
|
||||
%v1 = select <WIDTH x i1> %2, <WIDTH x i64> %1, <WIDTH x i64> %v
|
||||
store <WIDTH x i64> %v1, <WIDTH x i64> * %0
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @__masked_store_blend_double(<WIDTH x double>* nocapture,
|
||||
<WIDTH x double>, <WIDTH x i1>) nounwind alwaysinline {
|
||||
%v = load PTR_OP_ARGS(`<WIDTH x double> ') %0
|
||||
%v1 = select <WIDTH x i1> %2, <WIDTH x double> %1, <WIDTH x double> %v
|
||||
store <WIDTH x double> %v1, <WIDTH x double> * %0
|
||||
ret void
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; gather/scatter
|
||||
|
||||
define(`gather_scatter', `
|
||||
declare <WIDTH x $1> @__gather_base_offsets32_$1(i8 * nocapture, <WIDTH x i32>,
|
||||
i32, <WIDTH x i1>) nounwind readonly
|
||||
declare <WIDTH x $1> @__gather_base_offsets64_$1(i8 * nocapture, <WIDTH x i64>,
|
||||
i32, <WIDTH x i1>) nounwind readonly
|
||||
declare <WIDTH x $1> @__gather_base_offsets32_$1(i8 * nocapture, i32, <WIDTH x i32>,
|
||||
<WIDTH x i1>) nounwind readonly
|
||||
declare <WIDTH x $1> @__gather_base_offsets64_$1(i8 * nocapture, i32, <WIDTH x i64>,
|
||||
<WIDTH x i1>) nounwind readonly
|
||||
declare <WIDTH x $1> @__gather32_$1(<WIDTH x i32>,
|
||||
<WIDTH x i1>) nounwind readonly
|
||||
declare <WIDTH x $1> @__gather64_$1(<WIDTH x i64>,
|
||||
<WIDTH x i1>) nounwind readonly
|
||||
|
||||
declare void @__scatter_base_offsets32_$1(i8* nocapture, <WIDTH x i32>,
|
||||
i32, <WIDTH x $1>, <WIDTH x i1>) nounwind
|
||||
declare void @__scatter_base_offsets64_$1(i8* nocapture, <WIDTH x i64>,
|
||||
i32, <WIDTH x $1>, <WIDTH x i1>) nounwind
|
||||
declare void @__scatter_base_offsets32_$1(i8* nocapture, i32, <WIDTH x i32>,
|
||||
<WIDTH x $1>, <WIDTH x i1>) nounwind
|
||||
declare void @__scatter_base_offsets64_$1(i8* nocapture, i32, <WIDTH x i64>,
|
||||
<WIDTH x $1>, <WIDTH x i1>) nounwind
|
||||
declare void @__scatter32_$1(<WIDTH x i32>, <WIDTH x $1>,
|
||||
<WIDTH x i1>) nounwind
|
||||
declare void @__scatter64_$1(<WIDTH x i64>, <WIDTH x $1>,
|
||||
@@ -299,12 +350,16 @@ declare void @__scatter64_$1(<WIDTH x i64>, <WIDTH x $1>,
|
||||
gather_scatter(i8)
|
||||
gather_scatter(i16)
|
||||
gather_scatter(i32)
|
||||
gather_scatter(float)
|
||||
gather_scatter(i64)
|
||||
gather_scatter(double)
|
||||
|
||||
declare i32 @__packed_load_active(i32 * nocapture, <WIDTH x i32> * nocapture,
|
||||
<WIDTH x i1>) nounwind
|
||||
declare i32 @__packed_store_active(i32 * nocapture, <WIDTH x i32> %vals,
|
||||
<WIDTH x i1>) nounwind
|
||||
declare i32 @__packed_store_active2(i32 * nocapture, <WIDTH x i32> %vals,
|
||||
<WIDTH x i1>) nounwind
|
||||
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
@@ -315,3 +370,25 @@ declare void @__prefetch_read_uniform_2(i8 * nocapture) nounwind
|
||||
declare void @__prefetch_read_uniform_3(i8 * nocapture) nounwind
|
||||
declare void @__prefetch_read_uniform_nt(i8 * nocapture) nounwind
|
||||
|
||||
declare void @__prefetch_read_varying_1(<WIDTH x i64> %addr, <WIDTH x MASK> %mask) nounwind
|
||||
declare void @__prefetch_read_varying_1_native(i8 * %base, i32 %scale, <WIDTH x i32> %offsets, <WIDTH x MASK> %mask) nounwind
|
||||
declare void @__prefetch_read_varying_2(<WIDTH x i64> %addr, <WIDTH x MASK> %mask) nounwind
|
||||
declare void @__prefetch_read_varying_2_native(i8 * %base, i32 %scale, <WIDTH x i32> %offsets, <WIDTH x MASK> %mask) nounwind
|
||||
declare void @__prefetch_read_varying_3(<WIDTH x i64> %addr, <WIDTH x MASK> %mask) nounwind
|
||||
declare void @__prefetch_read_varying_3_native(i8 * %base, i32 %scale, <WIDTH x i32> %offsets, <WIDTH x MASK> %mask) nounwind
|
||||
declare void @__prefetch_read_varying_nt(<WIDTH x i64> %addr, <WIDTH x MASK> %mask) nounwind
|
||||
declare void @__prefetch_read_varying_nt_native(i8 * %base, i32 %scale, <WIDTH x i32> %offsets, <WIDTH x MASK> %mask) nounwind
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; int8/int16 builtins
|
||||
|
||||
define_avgs()
|
||||
declare_nvptx()
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; reciprocals in double precision, if supported
|
||||
|
||||
rsqrtd_decl()
|
||||
rcpd_decl()
|
||||
|
||||
transcendetals_decl()
|
||||
trigonometry_decl()
|
||||
|
||||
74
builtins/target-knl.ll
Normal file
74
builtins/target-knl.ll
Normal file
@@ -0,0 +1,74 @@
|
||||
;; Copyright (c) 2015-2016, Intel Corporation
|
||||
;; All rights reserved.
|
||||
;;
|
||||
;; Redistribution and use in source and binary forms, with or without
|
||||
;; modification, are permitted provided that the following conditions are
|
||||
;; met:
|
||||
;;
|
||||
;; * Redistributions of source code must retain the above copyright
|
||||
;; notice, this list of conditions and the following disclaimer.
|
||||
;;
|
||||
;; * Redistributions in binary form must reproduce the above copyright
|
||||
;; notice, this list of conditions and the following disclaimer in the
|
||||
;; documentation and/or other materials provided with the distribution.
|
||||
;;
|
||||
;; * Neither the name of Intel Corporation nor the names of its
|
||||
;; contributors may be used to endorse or promote products derived from
|
||||
;; this software without specific prior written permission.
|
||||
;;
|
||||
;;
|
||||
;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
||||
;; IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
||||
;; TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
|
||||
;; PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
|
||||
;; OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||
;; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
;; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
;; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
;; LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
;; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
;; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
define(`WIDTH',`16')
|
||||
|
||||
ifelse(LLVM_VERSION, LLVM_3_7,
|
||||
`include(`target-avx512-common.ll')',
|
||||
LLVM_VERSION, LLVM_3_8,
|
||||
`include(`target-avx512-common.ll')',
|
||||
LLVM_VERSION, LLVM_3_9,
|
||||
`include(`target-avx512-common.ll')',
|
||||
LLVM_VERSION, LLVM_4_0,
|
||||
`include(`target-avx512-common.ll')',
|
||||
LLVM_VERSION, LLVM_5_0,
|
||||
`include(`target-avx512-common.ll')'
|
||||
)
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; rcp, rsqrt
|
||||
|
||||
define(`rcp_rsqrt_varying_float_knl',`
|
||||
declare <16 x float> @llvm.x86.avx512.rcp28.ps(<16 x float>, <16 x float>, i16, i32) nounwind readnone
|
||||
define <16 x float> @__rcp_varying_float(<16 x float>) nounwind readonly alwaysinline {
|
||||
%res = call <16 x float> @llvm.x86.avx512.rcp28.ps(<16 x float> %0, <16 x float> undef, i16 -1, i32 8)
|
||||
ret <16 x float> %res
|
||||
}
|
||||
declare <16 x float> @llvm.x86.avx512.rsqrt28.ps(<16 x float>, <16 x float>, i16, i32) nounwind readnone
|
||||
define <16 x float> @__rsqrt_varying_float(<16 x float> %v) nounwind readonly alwaysinline {
|
||||
%res = call <16 x float> @llvm.x86.avx512.rsqrt28.ps(<16 x float> %v, <16 x float> undef, i16 -1, i32 8)
|
||||
ret <16 x float> %res
|
||||
}
|
||||
')
|
||||
|
||||
ifelse(LLVM_VERSION, LLVM_3_7,
|
||||
rcp_rsqrt_varying_float_knl(),
|
||||
LLVM_VERSION, LLVM_3_8,
|
||||
rcp_rsqrt_varying_float_knl(),
|
||||
LLVM_VERSION, LLVM_3_9,
|
||||
rcp_rsqrt_varying_float_knl(),
|
||||
LLVM_VERSION, LLVM_4_0,
|
||||
rcp_rsqrt_varying_float_knl(),
|
||||
LLVM_VERSION, LLVM_5_0,
|
||||
rcp_rsqrt_varying_float_knl()
|
||||
)
|
||||
|
||||
;;saturation_arithmetic_novec()
|
||||
527
builtins/target-neon-16.ll
Normal file
527
builtins/target-neon-16.ll
Normal file
@@ -0,0 +1,527 @@
|
||||
;;
|
||||
;; target-neon-16.ll
|
||||
;;
|
||||
;; Copyright(c) 2013-2015 Google, Inc.
|
||||
;;
|
||||
;; All rights reserved.
|
||||
;;
|
||||
;; Redistribution and use in source and binary forms, with or without
|
||||
;; modification, are permitted provided that the following conditions are
|
||||
;; met:
|
||||
;;
|
||||
;; * Redistributions of source code must retain the above copyright
|
||||
;; notice, this list of conditions and the following disclaimer.
|
||||
;;
|
||||
;; * Redistributions in binary form must reproduce the above copyright
|
||||
;; notice, this list of conditions and the following disclaimer in the
|
||||
;; documentation and/or other materials provided with the distribution.
|
||||
;;
|
||||
;; * Neither the name of Matt Pharr nor the names of its
|
||||
;; contributors may be used to endorse or promote products derived from
|
||||
;; this software without specific prior written permission.
|
||||
;;
|
||||
;;
|
||||
;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
||||
;; IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
||||
;; TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
|
||||
;; PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
|
||||
;; OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||
;; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
;; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
;; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
;; LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
;; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
;; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
define(`WIDTH',`8')
|
||||
define(`MASK',`i16')
|
||||
|
||||
include(`util.m4')
|
||||
include(`target-neon-common.ll')
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; half conversion routines
|
||||
|
||||
define <8 x float> @__half_to_float_varying(<8 x i16> %v) nounwind readnone alwaysinline {
|
||||
unary4to8conv(r, i16, float, @llvm.arm.neon.vcvthf2fp, %v)
|
||||
ret <8 x float> %r
|
||||
}
|
||||
|
||||
define <8 x i16> @__float_to_half_varying(<8 x float> %v) nounwind readnone alwaysinline {
|
||||
unary4to8conv(r, float, i16, @llvm.arm.neon.vcvtfp2hf, %v)
|
||||
ret <8 x i16> %r
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; math
|
||||
|
||||
;; round/floor/ceil
|
||||
|
||||
;; FIXME: grabbed these from the sse2 target, which does not have native
|
||||
;; instructions for these. Is there a better approach for NEON?
|
||||
|
||||
define <8 x float> @__round_varying_float(<8 x float>) nounwind readonly alwaysinline {
|
||||
%float_to_int_bitcast.i.i.i.i = bitcast <8 x float> %0 to <8 x i32>
|
||||
%bitop.i.i = and <8 x i32> %float_to_int_bitcast.i.i.i.i,
|
||||
<i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648,
|
||||
i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648>
|
||||
%bitop.i = xor <8 x i32> %float_to_int_bitcast.i.i.i.i, %bitop.i.i
|
||||
%int_to_float_bitcast.i.i40.i = bitcast <8 x i32> %bitop.i to <8 x float>
|
||||
%binop.i = fadd <8 x float> %int_to_float_bitcast.i.i40.i,
|
||||
<float 8.388608e+06, float 8.388608e+06, float 8.388608e+06, float 8.388608e+06,
|
||||
float 8.388608e+06, float 8.388608e+06, float 8.388608e+06, float 8.388608e+06>
|
||||
%binop21.i = fadd <8 x float> %binop.i,
|
||||
<float -8.388608e+06, float -8.388608e+06, float -8.388608e+06, float -8.388608e+06,
|
||||
float -8.388608e+06, float -8.388608e+06, float -8.388608e+06, float -8.388608e+06>
|
||||
%float_to_int_bitcast.i.i.i = bitcast <8 x float> %binop21.i to <8 x i32>
|
||||
%bitop31.i = xor <8 x i32> %float_to_int_bitcast.i.i.i, %bitop.i.i
|
||||
%int_to_float_bitcast.i.i.i = bitcast <8 x i32> %bitop31.i to <8 x float>
|
||||
ret <8 x float> %int_to_float_bitcast.i.i.i
|
||||
}
|
||||
|
||||
define <8 x float> @__floor_varying_float(<8 x float>) nounwind readonly alwaysinline {
|
||||
%calltmp.i = tail call <8 x float> @__round_varying_float(<8 x float> %0) nounwind
|
||||
%bincmp.i = fcmp ogt <8 x float> %calltmp.i, %0
|
||||
%val_to_boolvec32.i = sext <8 x i1> %bincmp.i to <8 x i32>
|
||||
%bitop.i = and <8 x i32> %val_to_boolvec32.i,
|
||||
<i32 -1082130432, i32 -1082130432, i32 -1082130432, i32 -1082130432,
|
||||
i32 -1082130432, i32 -1082130432, i32 -1082130432, i32 -1082130432>
|
||||
%int_to_float_bitcast.i.i.i = bitcast <8 x i32> %bitop.i to <8 x float>
|
||||
%binop.i = fadd <8 x float> %calltmp.i, %int_to_float_bitcast.i.i.i
|
||||
ret <8 x float> %binop.i
|
||||
}
|
||||
|
||||
define <8 x float> @__ceil_varying_float(<8 x float>) nounwind readonly alwaysinline {
|
||||
%calltmp.i = tail call <8 x float> @__round_varying_float(<8 x float> %0) nounwind
|
||||
%bincmp.i = fcmp olt <8 x float> %calltmp.i, %0
|
||||
%val_to_boolvec32.i = sext <8 x i1> %bincmp.i to <8 x i32>
|
||||
%bitop.i = and <8 x i32> %val_to_boolvec32.i,
|
||||
<i32 1065353216, i32 1065353216, i32 1065353216, i32 1065353216,
|
||||
i32 1065353216, i32 1065353216, i32 1065353216, i32 1065353216>
|
||||
%int_to_float_bitcast.i.i.i = bitcast <8 x i32> %bitop.i to <8 x float>
|
||||
%binop.i = fadd <8 x float> %calltmp.i, %int_to_float_bitcast.i.i.i
|
||||
ret <8 x float> %binop.i
|
||||
}
|
||||
|
||||
;; FIXME: rounding doubles and double vectors needs to be implemented
|
||||
declare <WIDTH x double> @__round_varying_double(<WIDTH x double>) nounwind readnone
|
||||
declare <WIDTH x double> @__floor_varying_double(<WIDTH x double>) nounwind readnone
|
||||
declare <WIDTH x double> @__ceil_varying_double(<WIDTH x double>) nounwind readnone
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; min/max
|
||||
|
||||
declare <4 x float> @llvm.arm.neon.vmins.v4f32(<4 x float>, <4 x float>) nounwind readnone
|
||||
declare <4 x float> @llvm.arm.neon.vmaxs.v4f32(<4 x float>, <4 x float>) nounwind readnone
|
||||
|
||||
define <WIDTH x float> @__max_varying_float(<WIDTH x float>,
|
||||
<WIDTH x float>) nounwind readnone alwaysinline {
|
||||
binary4to8(r, float, @llvm.arm.neon.vmaxs.v4f32, %0, %1)
|
||||
ret <WIDTH x float> %r
|
||||
}
|
||||
|
||||
define <WIDTH x float> @__min_varying_float(<WIDTH x float>,
|
||||
<WIDTH x float>) nounwind readnone alwaysinline {
|
||||
binary4to8(r, float, @llvm.arm.neon.vmins.v4f32, %0, %1)
|
||||
ret <WIDTH x float> %r
|
||||
}
|
||||
|
||||
declare <4 x i32> @llvm.arm.neon.vmins.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
|
||||
declare <4 x i32> @llvm.arm.neon.vminu.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
|
||||
declare <4 x i32> @llvm.arm.neon.vmaxs.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
|
||||
declare <4 x i32> @llvm.arm.neon.vmaxu.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
|
||||
|
||||
define <WIDTH x i32> @__min_varying_int32(<WIDTH x i32>, <WIDTH x i32>) nounwind readnone alwaysinline {
|
||||
binary4to8(r, i32, @llvm.arm.neon.vmins.v4i32, %0, %1)
|
||||
ret <WIDTH x i32> %r
|
||||
}
|
||||
|
||||
define <WIDTH x i32> @__max_varying_int32(<WIDTH x i32>, <WIDTH x i32>) nounwind readnone alwaysinline {
|
||||
binary4to8(r, i32, @llvm.arm.neon.vmaxs.v4i32, %0, %1)
|
||||
ret <WIDTH x i32> %r
|
||||
}
|
||||
|
||||
define <WIDTH x i32> @__min_varying_uint32(<WIDTH x i32>, <WIDTH x i32>) nounwind readnone alwaysinline {
|
||||
binary4to8(r, i32, @llvm.arm.neon.vminu.v4i32, %0, %1)
|
||||
ret <WIDTH x i32> %r
|
||||
}
|
||||
|
||||
define <WIDTH x i32> @__max_varying_uint32(<WIDTH x i32>, <WIDTH x i32>) nounwind readnone alwaysinline {
|
||||
binary4to8(r, i32, @llvm.arm.neon.vmaxu.v4i32, %0, %1)
|
||||
ret <WIDTH x i32> %r
|
||||
}
|
||||
|
||||
;; sqrt/rsqrt/rcp
|
||||
|
||||
declare <4 x float> @llvm.arm.neon.vrecpe.v4f32(<4 x float>) nounwind readnone
|
||||
declare <4 x float> @llvm.arm.neon.vrecps.v4f32(<4 x float>, <4 x float>) nounwind readnone
|
||||
|
||||
define <WIDTH x float> @__rcp_varying_float(<WIDTH x float> %d) nounwind readnone alwaysinline {
|
||||
unary4to8(x0, float, @llvm.arm.neon.vrecpe.v4f32, %d)
|
||||
binary4to8(x0_nr, float, @llvm.arm.neon.vrecps.v4f32, %d, %x0)
|
||||
%x1 = fmul <WIDTH x float> %x0, %x0_nr
|
||||
binary4to8(x1_nr, float, @llvm.arm.neon.vrecps.v4f32, %d, %x1)
|
||||
%x2 = fmul <WIDTH x float> %x1, %x1_nr
|
||||
ret <WIDTH x float> %x2
|
||||
}
|
||||
|
||||
declare <4 x float> @llvm.arm.neon.vrsqrte.v4f32(<4 x float>) nounwind readnone
|
||||
declare <4 x float> @llvm.arm.neon.vrsqrts.v4f32(<4 x float>, <4 x float>) nounwind readnone
|
||||
|
||||
define <WIDTH x float> @__rsqrt_varying_float(<WIDTH x float> %d) nounwind readnone alwaysinline {
|
||||
unary4to8(x0, float, @llvm.arm.neon.vrsqrte.v4f32, %d)
|
||||
%x0_2 = fmul <WIDTH x float> %x0, %x0
|
||||
binary4to8(x0_nr, float, @llvm.arm.neon.vrsqrts.v4f32, %d, %x0_2)
|
||||
%x1 = fmul <WIDTH x float> %x0, %x0_nr
|
||||
%x1_2 = fmul <WIDTH x float> %x1, %x1
|
||||
binary4to8(x1_nr, float, @llvm.arm.neon.vrsqrts.v4f32, %d, %x1_2)
|
||||
%x2 = fmul <WIDTH x float> %x1, %x1_nr
|
||||
ret <WIDTH x float> %x2
|
||||
}
|
||||
|
||||
define float @__rsqrt_uniform_float(float) nounwind readnone alwaysinline {
|
||||
%v1 = bitcast float %0 to <1 x float>
|
||||
%vs = shufflevector <1 x float> %v1, <1 x float> undef,
|
||||
<8 x i32> <i32 0, i32 undef, i32 undef, i32 undef,
|
||||
i32 undef, i32 undef, i32 undef, i32 undef>
|
||||
%vr = call <8 x float> @__rsqrt_varying_float(<8 x float> %vs)
|
||||
%r = extractelement <8 x float> %vr, i32 0
|
||||
ret float %r
|
||||
}
|
||||
|
||||
define float @__rcp_uniform_float(float) nounwind readnone alwaysinline {
|
||||
%v1 = bitcast float %0 to <1 x float>
|
||||
%vs = shufflevector <1 x float> %v1, <1 x float> undef,
|
||||
<8 x i32> <i32 0, i32 undef, i32 undef, i32 undef,
|
||||
i32 undef, i32 undef, i32 undef, i32 undef>
|
||||
%vr = call <8 x float> @__rcp_varying_float(<8 x float> %vs)
|
||||
%r = extractelement <8 x float> %vr, i32 0
|
||||
ret float %r
|
||||
}
|
||||
|
||||
declare <4 x float> @llvm.sqrt.v4f32(<4 x float>)
|
||||
|
||||
define <WIDTH x float> @__sqrt_varying_float(<WIDTH x float>) nounwind readnone alwaysinline {
|
||||
unary4to8(result, float, @llvm.sqrt.v4f32, %0)
|
||||
;; this returns nan for v=0, which is undesirable..
|
||||
;; %rsqrt = call <WIDTH x float> @__rsqrt_varying_float(<WIDTH x float> %0)
|
||||
;; %result = fmul <4 x float> %rsqrt, %0
|
||||
ret <8 x float> %result
|
||||
}
|
||||
|
||||
declare <4 x double> @llvm.sqrt.v4f64(<4 x double>)
|
||||
|
||||
define <WIDTH x double> @__sqrt_varying_double(<WIDTH x double>) nounwind readnone alwaysinline {
|
||||
unary4to8(r, double, @llvm.sqrt.v4f64, %0)
|
||||
ret <WIDTH x double> %r
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; reductions
|
||||
|
||||
define i64 @__movmsk(<WIDTH x MASK>) nounwind readnone alwaysinline {
|
||||
%and_mask = and <WIDTH x i16> %0,
|
||||
<i16 1, i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128>
|
||||
%v4 = call <4 x i32> @llvm.arm.neon.vpaddlu.v4i32.v8i16(<8 x i16> %and_mask)
|
||||
%v2 = call <2 x i64> @llvm.arm.neon.vpaddlu.v2i64.v4i32(<4 x i32> %v4)
|
||||
%va = extractelement <2 x i64> %v2, i32 0
|
||||
%vb = extractelement <2 x i64> %v2, i32 1
|
||||
%v = or i64 %va, %vb
|
||||
ret i64 %v
|
||||
}
|
||||
|
||||
define i1 @__any(<WIDTH x MASK>) nounwind readnone alwaysinline {
|
||||
v8tov4(MASK, %0, %v0123, %v4567)
|
||||
%vor = or <4 x MASK> %v0123, %v4567
|
||||
%v0 = extractelement <4 x MASK> %vor, i32 0
|
||||
%v1 = extractelement <4 x MASK> %vor, i32 1
|
||||
%v2 = extractelement <4 x MASK> %vor, i32 2
|
||||
%v3 = extractelement <4 x MASK> %vor, i32 3
|
||||
%v01 = or MASK %v0, %v1
|
||||
%v23 = or MASK %v2, %v3
|
||||
%v = or MASK %v01, %v23
|
||||
%cmp = icmp ne MASK %v, 0
|
||||
ret i1 %cmp
|
||||
}
|
||||
|
||||
define i1 @__all(<WIDTH x MASK>) nounwind readnone alwaysinline {
|
||||
v8tov4(MASK, %0, %v0123, %v4567)
|
||||
%vand = and <4 x MASK> %v0123, %v4567
|
||||
%v0 = extractelement <4 x MASK> %vand, i32 0
|
||||
%v1 = extractelement <4 x MASK> %vand, i32 1
|
||||
%v2 = extractelement <4 x MASK> %vand, i32 2
|
||||
%v3 = extractelement <4 x MASK> %vand, i32 3
|
||||
%v01 = and MASK %v0, %v1
|
||||
%v23 = and MASK %v2, %v3
|
||||
%v = and MASK %v01, %v23
|
||||
%cmp = icmp ne MASK %v, 0
|
||||
ret i1 %cmp
|
||||
}
|
||||
|
||||
define i1 @__none(<WIDTH x MASK>) nounwind readnone alwaysinline {
|
||||
%any = call i1 @__any(<WIDTH x MASK> %0)
|
||||
%none = icmp eq i1 %any, 0
|
||||
ret i1 %none
|
||||
}
|
||||
|
||||
;; $1: scalar type
|
||||
;; $2: vector/vector reduce function (2 x <WIDTH x vec> -> <WIDTH x vec>)
|
||||
;; $3: pairwise vector reduce function (2 x <2 x vec> -> <2 x vec>)
|
||||
;; $4: scalar reduce function
|
||||
|
||||
define(`neon_reduce', `
|
||||
v8tov4($1, %0, %v0123, %v4567)
|
||||
%v0123_8 = shufflevector <4 x $1> %v0123, <4 x $1> undef,
|
||||
<8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
|
||||
%v4567_8 = shufflevector <4 x $1> %v4567, <4 x $1> undef,
|
||||
<8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
|
||||
%vfirst = call <8 x $1> $2(<8 x $1> %v0123_8, <8 x $1> %v4567_8)
|
||||
%vfirst_4 = shufflevector <8 x $1> %vfirst, <8 x $1> undef,
|
||||
<4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
||||
v4tov2($1, %vfirst_4, %v0, %v1)
|
||||
%vh = call <2 x $1> $3(<2 x $1> %v0, <2 x $1> %v1)
|
||||
%vh0 = extractelement <2 x $1> %vh, i32 0
|
||||
%vh1 = extractelement <2 x $1> %vh, i32 1
|
||||
%r = call $1 $4($1 %vh0, $1 %vh1)
|
||||
ret $1 %r
|
||||
')
|
||||
|
||||
declare <2 x float> @llvm.arm.neon.vpadd.v2f32(<2 x float>, <2 x float>) nounwind readnone
|
||||
|
||||
define internal float @add_f32(float, float) nounwind readnone alwaysinline {
|
||||
%r = fadd float %0, %1
|
||||
ret float %r
|
||||
}
|
||||
|
||||
define internal <WIDTH x float> @__add_varying_float(<WIDTH x float>, <WIDTH x float>) nounwind readnone alwaysinline {
|
||||
%r = fadd <WIDTH x float> %0, %1
|
||||
ret <WIDTH x float> %r
|
||||
}
|
||||
|
||||
define float @__reduce_add_float(<WIDTH x float>) nounwind readnone alwaysinline {
|
||||
neon_reduce(float, @__add_varying_float, @llvm.arm.neon.vpadd.v2f32, @add_f32)
|
||||
}
|
||||
|
||||
declare <2 x float> @llvm.arm.neon.vpmins.v2f32(<2 x float>, <2 x float>) nounwind readnone
|
||||
|
||||
define internal float @min_f32(float, float) nounwind readnone alwaysinline {
|
||||
%cmp = fcmp olt float %0, %1
|
||||
%r = select i1 %cmp, float %0, float %1
|
||||
ret float %r
|
||||
}
|
||||
|
||||
define float @__reduce_min_float(<WIDTH x float>) nounwind readnone alwaysinline {
|
||||
neon_reduce(float, @__min_varying_float, @llvm.arm.neon.vpmins.v2f32, @min_f32)
|
||||
}
|
||||
|
||||
declare <2 x float> @llvm.arm.neon.vpmaxs.v2f32(<2 x float>, <2 x float>) nounwind readnone
|
||||
|
||||
define internal float @max_f32(float, float) nounwind readnone alwaysinline {
|
||||
%cmp = fcmp ugt float %0, %1
|
||||
%r = select i1 %cmp, float %0, float %1
|
||||
ret float %r
|
||||
}
|
||||
|
||||
define float @__reduce_max_float(<WIDTH x float>) nounwind readnone alwaysinline {
|
||||
neon_reduce(float, @__max_varying_float, @llvm.arm.neon.vpmaxs.v2f32, @max_f32)
|
||||
}
|
||||
|
||||
declare <4 x i16> @llvm.arm.neon.vpaddls.v4i16.v8i8(<8 x i8>) nounwind readnone
|
||||
declare <2 x i32> @llvm.arm.neon.vpaddlu.v2i32.v4i16(<4 x i16>) nounwind readnone
|
||||
|
||||
define i16 @__reduce_add_int8(<WIDTH x i8>) nounwind readnone alwaysinline {
|
||||
%a16 = call <4 x i16> @llvm.arm.neon.vpaddls.v4i16.v8i8(<8 x i8> %0)
|
||||
%a32 = call <2 x i32> @llvm.arm.neon.vpaddlu.v2i32.v4i16(<4 x i16> %a16)
|
||||
%a0 = extractelement <2 x i32> %a32, i32 0
|
||||
%a1 = extractelement <2 x i32> %a32, i32 1
|
||||
%r = add i32 %a0, %a1
|
||||
%r16 = trunc i32 %r to i16
|
||||
ret i16 %r16
|
||||
}
|
||||
|
||||
declare <4 x i32> @llvm.arm.neon.vpaddlu.v4i32.v8i16(<WIDTH x i16>)
|
||||
|
||||
define i64 @__reduce_add_int16(<WIDTH x i16>) nounwind readnone alwaysinline {
|
||||
%a1 = call <4 x i32> @llvm.arm.neon.vpaddlu.v4i32.v8i16(<WIDTH x i16> %0)
|
||||
%a2 = call <2 x i64> @llvm.arm.neon.vpaddlu.v2i64.v4i32(<4 x i32> %a1)
|
||||
%aa = extractelement <2 x i64> %a2, i32 0
|
||||
%ab = extractelement <2 x i64> %a2, i32 1
|
||||
%r = add i64 %aa, %ab
|
||||
ret i64 %r
|
||||
}
|
||||
|
||||
declare <2 x i64> @llvm.arm.neon.vpaddlu.v2i64.v4i32(<4 x i32>) nounwind readnone
|
||||
|
||||
define i64 @__reduce_add_int32(<WIDTH x i32>) nounwind readnone alwaysinline {
|
||||
v8tov4(i32, %0, %va, %vb)
|
||||
%pa = call <2 x i64> @llvm.arm.neon.vpaddlu.v2i64.v4i32(<4 x i32> %va)
|
||||
%pb = call <2 x i64> @llvm.arm.neon.vpaddlu.v2i64.v4i32(<4 x i32> %vb)
|
||||
%psum = add <2 x i64> %pa, %pb
|
||||
%a0 = extractelement <2 x i64> %psum, i32 0
|
||||
%a1 = extractelement <2 x i64> %psum, i32 1
|
||||
%r = add i64 %a0, %a1
|
||||
ret i64 %r
|
||||
}
|
||||
|
||||
declare <2 x i32> @llvm.arm.neon.vpmins.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
|
||||
|
||||
define internal i32 @min_si32(i32, i32) nounwind readnone alwaysinline {
|
||||
%cmp = icmp slt i32 %0, %1
|
||||
%r = select i1 %cmp, i32 %0, i32 %1
|
||||
ret i32 %r
|
||||
}
|
||||
|
||||
define i32 @__reduce_min_int32(<WIDTH x i32>) nounwind readnone alwaysinline {
|
||||
neon_reduce(i32, @__min_varying_int32, @llvm.arm.neon.vpmins.v2i32, @min_si32)
|
||||
}
|
||||
|
||||
declare <2 x i32> @llvm.arm.neon.vpmaxs.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
|
||||
|
||||
define internal i32 @max_si32(i32, i32) nounwind readnone alwaysinline {
|
||||
%cmp = icmp sgt i32 %0, %1
|
||||
%r = select i1 %cmp, i32 %0, i32 %1
|
||||
ret i32 %r
|
||||
}
|
||||
|
||||
define i32 @__reduce_max_int32(<WIDTH x i32>) nounwind readnone alwaysinline {
|
||||
neon_reduce(i32, @__max_varying_int32, @llvm.arm.neon.vpmaxs.v2i32, @max_si32)
|
||||
}
|
||||
|
||||
declare <2 x i32> @llvm.arm.neon.vpminu.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
|
||||
|
||||
define internal i32 @min_ui32(i32, i32) nounwind readnone alwaysinline {
|
||||
%cmp = icmp ult i32 %0, %1
|
||||
%r = select i1 %cmp, i32 %0, i32 %1
|
||||
ret i32 %r
|
||||
}
|
||||
|
||||
define i32 @__reduce_min_uint32(<WIDTH x i32>) nounwind readnone alwaysinline {
|
||||
neon_reduce(i32, @__min_varying_uint32, @llvm.arm.neon.vpmins.v2i32, @min_ui32)
|
||||
}
|
||||
|
||||
declare <2 x i32> @llvm.arm.neon.vpmaxu.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
|
||||
|
||||
define internal i32 @max_ui32(i32, i32) nounwind readnone alwaysinline {
|
||||
%cmp = icmp ugt i32 %0, %1
|
||||
%r = select i1 %cmp, i32 %0, i32 %1
|
||||
ret i32 %r
|
||||
}
|
||||
|
||||
define i32 @__reduce_max_uint32(<WIDTH x i32>) nounwind readnone alwaysinline {
|
||||
neon_reduce(i32, @__max_varying_uint32, @llvm.arm.neon.vpmaxs.v2i32, @max_ui32)
|
||||
}
|
||||
|
||||
define double @__reduce_add_double(<WIDTH x double>) nounwind readnone alwaysinline {
|
||||
v8tov2(double, %0, %v0, %v1, %v2, %v3)
|
||||
%v01 = fadd <2 x double> %v0, %v1
|
||||
%v23 = fadd <2 x double> %v2, %v3
|
||||
%sum = fadd <2 x double> %v01, %v23
|
||||
%e0 = extractelement <2 x double> %sum, i32 0
|
||||
%e1 = extractelement <2 x double> %sum, i32 1
|
||||
%m = fadd double %e0, %e1
|
||||
ret double %m
|
||||
}
|
||||
|
||||
define double @__reduce_min_double(<WIDTH x double>) nounwind readnone alwaysinline {
|
||||
reduce8(double, @__min_varying_double, @__min_uniform_double)
|
||||
}
|
||||
|
||||
define double @__reduce_max_double(<WIDTH x double>) nounwind readnone alwaysinline {
|
||||
reduce8(double, @__max_varying_double, @__max_uniform_double)
|
||||
}
|
||||
|
||||
define i64 @__reduce_add_int64(<WIDTH x i64>) nounwind readnone alwaysinline {
|
||||
v8tov2(i64, %0, %v0, %v1, %v2, %v3)
|
||||
%v01 = add <2 x i64> %v0, %v1
|
||||
%v23 = add <2 x i64> %v2, %v3
|
||||
%sum = add <2 x i64> %v01, %v23
|
||||
%e0 = extractelement <2 x i64> %sum, i32 0
|
||||
%e1 = extractelement <2 x i64> %sum, i32 1
|
||||
%m = add i64 %e0, %e1
|
||||
ret i64 %m
|
||||
}
|
||||
|
||||
define i64 @__reduce_min_int64(<WIDTH x i64>) nounwind readnone alwaysinline {
|
||||
reduce8(i64, @__min_varying_int64, @__min_uniform_int64)
|
||||
}
|
||||
|
||||
define i64 @__reduce_max_int64(<WIDTH x i64>) nounwind readnone alwaysinline {
|
||||
reduce8(i64, @__max_varying_int64, @__max_uniform_int64)
|
||||
}
|
||||
|
||||
define i64 @__reduce_min_uint64(<WIDTH x i64>) nounwind readnone alwaysinline {
|
||||
reduce8(i64, @__min_varying_uint64, @__min_uniform_uint64)
|
||||
}
|
||||
|
||||
define i64 @__reduce_max_uint64(<WIDTH x i64>) nounwind readnone alwaysinline {
|
||||
reduce8(i64, @__max_varying_uint64, @__max_uniform_uint64)
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; int8/int16
|
||||
|
||||
declare <8 x i8> @llvm.arm.neon.vrhaddu.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
|
||||
|
||||
define <8 x i8> @__avg_up_uint8(<8 x i8>, <8 x i8>) nounwind readnone alwaysinline {
|
||||
%r = call <8 x i8> @llvm.arm.neon.vrhaddu.v8i8(<8 x i8> %0, <8 x i8> %1)
|
||||
ret <8 x i8> %r
|
||||
}
|
||||
|
||||
declare <8 x i8> @llvm.arm.neon.vrhadds.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
|
||||
|
||||
define <8 x i8> @__avg_up_int8(<8 x i8>, <8 x i8>) nounwind readnone alwaysinline {
|
||||
%r = call <8 x i8> @llvm.arm.neon.vrhadds.v8i8(<8 x i8> %0, <8 x i8> %1)
|
||||
ret <8 x i8> %r
|
||||
}
|
||||
|
||||
declare <8 x i8> @llvm.arm.neon.vhaddu.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
|
||||
|
||||
define <8 x i8> @__avg_down_uint8(<8 x i8>, <8 x i8>) nounwind readnone alwaysinline {
|
||||
%r = call <8 x i8> @llvm.arm.neon.vhaddu.v8i8(<8 x i8> %0, <8 x i8> %1)
|
||||
ret <8 x i8> %r
|
||||
}
|
||||
|
||||
declare <8 x i8> @llvm.arm.neon.vhadds.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
|
||||
|
||||
define <8 x i8> @__avg_down_int8(<8 x i8>, <8 x i8>) nounwind readnone alwaysinline {
|
||||
%r = call <8 x i8> @llvm.arm.neon.vhadds.v8i8(<8 x i8> %0, <8 x i8> %1)
|
||||
ret <8 x i8> %r
|
||||
}
|
||||
|
||||
declare <8 x i16> @llvm.arm.neon.vrhaddu.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
|
||||
|
||||
define <8 x i16> @__avg_up_uint16(<8 x i16>, <8 x i16>) nounwind readnone alwaysinline {
|
||||
%r = call <8 x i16> @llvm.arm.neon.vrhaddu.v8i16(<8 x i16> %0, <8 x i16> %1)
|
||||
ret <8 x i16> %r
|
||||
}
|
||||
|
||||
declare <8 x i16> @llvm.arm.neon.vrhadds.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
|
||||
|
||||
define <8 x i16> @__avg_up_int16(<8 x i16>, <8 x i16>) nounwind readnone alwaysinline {
|
||||
%r = call <8 x i16> @llvm.arm.neon.vrhadds.v8i16(<8 x i16> %0, <8 x i16> %1)
|
||||
ret <8 x i16> %r
|
||||
}
|
||||
|
||||
declare <8 x i16> @llvm.arm.neon.vhaddu.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
|
||||
|
||||
define <8 x i16> @__avg_down_uint16(<8 x i16>, <8 x i16>) nounwind readnone alwaysinline {
|
||||
%r = call <8 x i16> @llvm.arm.neon.vhaddu.v8i16(<8 x i16> %0, <8 x i16> %1)
|
||||
ret <8 x i16> %r
|
||||
}
|
||||
|
||||
declare <8 x i16> @llvm.arm.neon.vhadds.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
|
||||
|
||||
define <8 x i16> @__avg_down_int16(<8 x i16>, <8 x i16>) nounwind readnone alwaysinline {
|
||||
%r = call <8 x i16> @llvm.arm.neon.vhadds.v8i16(<8 x i16> %0, <8 x i16> %1)
|
||||
ret <8 x i16> %r
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; reciprocals in double precision, if supported
|
||||
|
||||
rsqrtd_decl()
|
||||
rcpd_decl()
|
||||
|
||||
transcendetals_decl()
|
||||
trigonometry_decl()
|
||||
saturation_arithmetic()
|
||||
497
builtins/target-neon-32.ll
Normal file
497
builtins/target-neon-32.ll
Normal file
@@ -0,0 +1,497 @@
|
||||
;;
|
||||
;; target-neon-32.ll
|
||||
;;
|
||||
;; Copyright(c) 2012-2013 Matt Pharr
|
||||
;; Copyright(c) 2013, 2015 Google, Inc.
|
||||
;;
|
||||
;; All rights reserved.
|
||||
;;
|
||||
;; Redistribution and use in source and binary forms, with or without
|
||||
;; modification, are permitted provided that the following conditions are
|
||||
;; met:
|
||||
;;
|
||||
;; * Redistributions of source code must retain the above copyright
|
||||
;; notice, this list of conditions and the following disclaimer.
|
||||
;;
|
||||
;; * Redistributions in binary form must reproduce the above copyright
|
||||
;; notice, this list of conditions and the following disclaimer in the
|
||||
;; documentation and/or other materials provided with the distribution.
|
||||
;;
|
||||
;; * Neither the name of Matt Pharr nor the names of its
|
||||
;; contributors may be used to endorse or promote products derived from
|
||||
;; this software without specific prior written permission.
|
||||
;;
|
||||
;;
|
||||
;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
||||
;; IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
||||
;; TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
|
||||
;; PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
|
||||
;; OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||
;; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
;; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
;; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
;; LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
;; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
;; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
define(`WIDTH',`4')
|
||||
define(`MASK',`i32')
|
||||
|
||||
include(`util.m4')
|
||||
include(`target-neon-common.ll')
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; half conversion routines
|
||||
|
||||
define <4 x float> @__half_to_float_varying(<4 x i16> %v) nounwind readnone alwaysinline {
|
||||
%r = call <4 x float> @llvm.arm.neon.vcvthf2fp(<4 x i16> %v)
|
||||
ret <4 x float> %r
|
||||
}
|
||||
|
||||
define <4 x i16> @__float_to_half_varying(<4 x float> %v) nounwind readnone alwaysinline {
|
||||
%r = call <4 x i16> @llvm.arm.neon.vcvtfp2hf(<4 x float> %v)
|
||||
ret <4 x i16> %r
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; math
|
||||
|
||||
;; round/floor/ceil
|
||||
|
||||
;; FIXME: grabbed these from the sse2 target, which does not have native
|
||||
;; instructions for these. Is there a better approach for NEON?
|
||||
|
||||
define <4 x float> @__round_varying_float(<4 x float>) nounwind readonly alwaysinline {
|
||||
%float_to_int_bitcast.i.i.i.i = bitcast <4 x float> %0 to <4 x i32>
|
||||
%bitop.i.i = and <4 x i32> %float_to_int_bitcast.i.i.i.i, <i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648>
|
||||
%bitop.i = xor <4 x i32> %float_to_int_bitcast.i.i.i.i, %bitop.i.i
|
||||
%int_to_float_bitcast.i.i40.i = bitcast <4 x i32> %bitop.i to <4 x float>
|
||||
%binop.i = fadd <4 x float> %int_to_float_bitcast.i.i40.i, <float 8.388608e+06, float 8.388608e+06, float 8.388608e+06, float 8.388608e+06>
|
||||
%binop21.i = fadd <4 x float> %binop.i, <float -8.388608e+06, float -8.388608e+06, float -8.388608e+06, float -8.388608e+06>
|
||||
%float_to_int_bitcast.i.i.i = bitcast <4 x float> %binop21.i to <4 x i32>
|
||||
%bitop31.i = xor <4 x i32> %float_to_int_bitcast.i.i.i, %bitop.i.i
|
||||
%int_to_float_bitcast.i.i.i = bitcast <4 x i32> %bitop31.i to <4 x float>
|
||||
ret <4 x float> %int_to_float_bitcast.i.i.i
|
||||
}
|
||||
|
||||
define <4 x float> @__floor_varying_float(<4 x float>) nounwind readonly alwaysinline {
|
||||
%calltmp.i = tail call <4 x float> @__round_varying_float(<4 x float> %0) nounwind
|
||||
%bincmp.i = fcmp ogt <4 x float> %calltmp.i, %0
|
||||
%val_to_boolvec32.i = sext <4 x i1> %bincmp.i to <4 x i32>
|
||||
%bitop.i = and <4 x i32> %val_to_boolvec32.i, <i32 -1082130432, i32 -1082130432, i32 -1082130432, i32 -1082130432>
|
||||
%int_to_float_bitcast.i.i.i = bitcast <4 x i32> %bitop.i to <4 x float>
|
||||
%binop.i = fadd <4 x float> %calltmp.i, %int_to_float_bitcast.i.i.i
|
||||
ret <4 x float> %binop.i
|
||||
}
|
||||
|
||||
define <4 x float> @__ceil_varying_float(<4 x float>) nounwind readonly alwaysinline {
|
||||
%calltmp.i = tail call <4 x float> @__round_varying_float(<4 x float> %0) nounwind
|
||||
%bincmp.i = fcmp olt <4 x float> %calltmp.i, %0
|
||||
%val_to_boolvec32.i = sext <4 x i1> %bincmp.i to <4 x i32>
|
||||
%bitop.i = and <4 x i32> %val_to_boolvec32.i, <i32 1065353216, i32 1065353216, i32 1065353216, i32 1065353216>
|
||||
%int_to_float_bitcast.i.i.i = bitcast <4 x i32> %bitop.i to <4 x float>
|
||||
%binop.i = fadd <4 x float> %calltmp.i, %int_to_float_bitcast.i.i.i
|
||||
ret <4 x float> %binop.i
|
||||
}
|
||||
|
||||
;; FIXME: rounding doubles and double vectors needs to be implemented
|
||||
declare <WIDTH x double> @__round_varying_double(<WIDTH x double>) nounwind readnone
|
||||
declare <WIDTH x double> @__floor_varying_double(<WIDTH x double>) nounwind readnone
|
||||
declare <WIDTH x double> @__ceil_varying_double(<WIDTH x double>) nounwind readnone
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; min/max
|
||||
|
||||
declare <4 x float> @llvm.arm.neon.vmins.v4f32(<4 x float>, <4 x float>) nounwind readnone
|
||||
declare <4 x float> @llvm.arm.neon.vmaxs.v4f32(<4 x float>, <4 x float>) nounwind readnone
|
||||
|
||||
define <WIDTH x float> @__max_varying_float(<WIDTH x float>,
|
||||
<WIDTH x float>) nounwind readnone alwaysinline {
|
||||
%r = call <4 x float> @llvm.arm.neon.vmaxs.v4f32(<4 x float> %0, <4 x float> %1)
|
||||
ret <WIDTH x float> %r
|
||||
}
|
||||
|
||||
define <WIDTH x float> @__min_varying_float(<WIDTH x float>,
|
||||
<WIDTH x float>) nounwind readnone alwaysinline {
|
||||
%r = call <4 x float> @llvm.arm.neon.vmins.v4f32(<4 x float> %0, <4 x float> %1)
|
||||
ret <WIDTH x float> %r
|
||||
}
|
||||
|
||||
declare <4 x i32> @llvm.arm.neon.vmins.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
|
||||
declare <4 x i32> @llvm.arm.neon.vminu.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
|
||||
declare <4 x i32> @llvm.arm.neon.vmaxs.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
|
||||
declare <4 x i32> @llvm.arm.neon.vmaxu.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
|
||||
|
||||
define <WIDTH x i32> @__min_varying_int32(<WIDTH x i32>, <WIDTH x i32>) nounwind readnone alwaysinline {
|
||||
%r = call <4 x i32> @llvm.arm.neon.vmins.v4i32(<4 x i32> %0, <4 x i32> %1)
|
||||
ret <4 x i32> %r
|
||||
}
|
||||
|
||||
define <WIDTH x i32> @__max_varying_int32(<WIDTH x i32>, <WIDTH x i32>) nounwind readnone alwaysinline {
|
||||
%r = call <4 x i32> @llvm.arm.neon.vmaxs.v4i32(<4 x i32> %0, <4 x i32> %1)
|
||||
ret <4 x i32> %r
|
||||
}
|
||||
|
||||
define <WIDTH x i32> @__min_varying_uint32(<WIDTH x i32>, <WIDTH x i32>) nounwind readnone alwaysinline {
|
||||
%r = call <4 x i32> @llvm.arm.neon.vminu.v4i32(<4 x i32> %0, <4 x i32> %1)
|
||||
ret <4 x i32> %r
|
||||
}
|
||||
|
||||
define <WIDTH x i32> @__max_varying_uint32(<WIDTH x i32>, <WIDTH x i32>) nounwind readnone alwaysinline {
|
||||
%r = call <4 x i32> @llvm.arm.neon.vmaxu.v4i32(<4 x i32> %0, <4 x i32> %1)
|
||||
ret <4 x i32> %r
|
||||
}
|
||||
|
||||
;; sqrt/rsqrt/rcp
|
||||
|
||||
declare <4 x float> @llvm.arm.neon.vrecpe.v4f32(<4 x float>) nounwind readnone
|
||||
declare <4 x float> @llvm.arm.neon.vrecps.v4f32(<4 x float>, <4 x float>) nounwind readnone
|
||||
|
||||
define <WIDTH x float> @__rcp_varying_float(<WIDTH x float> %d) nounwind readnone alwaysinline {
|
||||
%x0 = call <4 x float> @llvm.arm.neon.vrecpe.v4f32(<4 x float> %d)
|
||||
%x0_nr = call <4 x float> @llvm.arm.neon.vrecps.v4f32(<4 x float> %d, <4 x float> %x0)
|
||||
%x1 = fmul <4 x float> %x0, %x0_nr
|
||||
%x1_nr = call <4 x float> @llvm.arm.neon.vrecps.v4f32(<4 x float> %d, <4 x float> %x1)
|
||||
%x2 = fmul <4 x float> %x1, %x1_nr
|
||||
ret <4 x float> %x2
|
||||
}
|
||||
|
||||
declare <4 x float> @llvm.arm.neon.vrsqrte.v4f32(<4 x float>) nounwind readnone
|
||||
declare <4 x float> @llvm.arm.neon.vrsqrts.v4f32(<4 x float>, <4 x float>) nounwind readnone
|
||||
|
||||
define <WIDTH x float> @__rsqrt_varying_float(<WIDTH x float> %d) nounwind readnone alwaysinline {
|
||||
%x0 = call <4 x float> @llvm.arm.neon.vrsqrte.v4f32(<4 x float> %d)
|
||||
%x0_2 = fmul <4 x float> %x0, %x0
|
||||
%x0_nr = call <4 x float> @llvm.arm.neon.vrsqrts.v4f32(<4 x float> %d, <4 x float> %x0_2)
|
||||
%x1 = fmul <4 x float> %x0, %x0_nr
|
||||
%x1_2 = fmul <4 x float> %x1, %x1
|
||||
%x1_nr = call <4 x float> @llvm.arm.neon.vrsqrts.v4f32(<4 x float> %d, <4 x float> %x1_2)
|
||||
%x2 = fmul <4 x float> %x1, %x1_nr
|
||||
ret <4 x float> %x2
|
||||
}
|
||||
|
||||
define float @__rsqrt_uniform_float(float) nounwind readnone alwaysinline {
|
||||
%v1 = bitcast float %0 to <1 x float>
|
||||
%vs = shufflevector <1 x float> %v1, <1 x float> undef,
|
||||
<4 x i32> <i32 0, i32 undef, i32 undef, i32 undef>
|
||||
%vr = call <4 x float> @__rsqrt_varying_float(<4 x float> %vs)
|
||||
%r = extractelement <4 x float> %vr, i32 0
|
||||
ret float %r
|
||||
}
|
||||
|
||||
define float @__rcp_uniform_float(float) nounwind readnone alwaysinline {
|
||||
%v1 = bitcast float %0 to <1 x float>
|
||||
%vs = shufflevector <1 x float> %v1, <1 x float> undef,
|
||||
<4 x i32> <i32 0, i32 undef, i32 undef, i32 undef>
|
||||
%vr = call <4 x float> @__rcp_varying_float(<4 x float> %vs)
|
||||
%r = extractelement <4 x float> %vr, i32 0
|
||||
ret float %r
|
||||
}
|
||||
|
||||
declare <4 x float> @llvm.sqrt.v4f32(<4 x float>)
|
||||
|
||||
define <WIDTH x float> @__sqrt_varying_float(<WIDTH x float>) nounwind readnone alwaysinline {
|
||||
%result = call <4 x float> @llvm.sqrt.v4f32(<4 x float> %0)
|
||||
;; this returns nan for v=0, which is undesirable..
|
||||
;; %rsqrt = call <WIDTH x float> @__rsqrt_varying_float(<WIDTH x float> %0)
|
||||
;; %result = fmul <4 x float> %rsqrt, %0
|
||||
ret <4 x float> %result
|
||||
}
|
||||
|
||||
declare <4 x double> @llvm.sqrt.v4f64(<4 x double>)
|
||||
|
||||
define <WIDTH x double> @__sqrt_varying_double(<WIDTH x double>) nounwind readnone alwaysinline {
|
||||
%r = call <4 x double> @llvm.sqrt.v4f64(<4 x double> %0)
|
||||
ret <4 x double> %r
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; reductions
|
||||
|
||||
define i64 @__movmsk(<4 x MASK>) nounwind readnone alwaysinline {
|
||||
%and_mask = and <4 x MASK> %0, <MASK 1, MASK 2, MASK 4, MASK 8>
|
||||
%v01 = shufflevector <4 x i32> %and_mask, <4 x i32> undef, <2 x i32> <i32 0, i32 1>
|
||||
%v23 = shufflevector <4 x i32> %and_mask, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
|
||||
%vor = or <2 x i32> %v01, %v23
|
||||
%v0 = extractelement <2 x i32> %vor, i32 0
|
||||
%v1 = extractelement <2 x i32> %vor, i32 1
|
||||
%v = or i32 %v0, %v1
|
||||
%mask64 = zext i32 %v to i64
|
||||
ret i64 %mask64
|
||||
}
|
||||
|
||||
define i1 @__any(<4 x i32>) nounwind readnone alwaysinline {
|
||||
%v01 = shufflevector <4 x i32> %0, <4 x i32> undef, <2 x i32> <i32 0, i32 1>
|
||||
%v23 = shufflevector <4 x i32> %0, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
|
||||
%vor = or <2 x i32> %v01, %v23
|
||||
%v0 = extractelement <2 x i32> %vor, i32 0
|
||||
%v1 = extractelement <2 x i32> %vor, i32 1
|
||||
%v = or i32 %v0, %v1
|
||||
%cmp = icmp ne i32 %v, 0
|
||||
ret i1 %cmp
|
||||
}
|
||||
|
||||
define i1 @__all(<4 x i32>) nounwind readnone alwaysinline {
|
||||
%v01 = shufflevector <4 x i32> %0, <4 x i32> undef, <2 x i32> <i32 0, i32 1>
|
||||
%v23 = shufflevector <4 x i32> %0, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
|
||||
%vor = and <2 x i32> %v01, %v23
|
||||
%v0 = extractelement <2 x i32> %vor, i32 0
|
||||
%v1 = extractelement <2 x i32> %vor, i32 1
|
||||
%v = and i32 %v0, %v1
|
||||
%cmp = icmp ne i32 %v, 0
|
||||
ret i1 %cmp
|
||||
}
|
||||
|
||||
define i1 @__none(<4 x i32>) nounwind readnone alwaysinline {
|
||||
%any = call i1 @__any(<4 x i32> %0)
|
||||
%none = icmp eq i1 %any, 0
|
||||
ret i1 %none
|
||||
}
|
||||
|
||||
;; $1: scalar type
|
||||
;; $2: vector reduce function (2 x <2 x vec> -> <2 x vec>)
|
||||
;; $3 scalar reduce function
|
||||
|
||||
define(`neon_reduce', `
|
||||
%v0 = shufflevector <4 x $1> %0, <4 x $1> undef, <2 x i32> <i32 0, i32 1>
|
||||
%v1 = shufflevector <4 x $1> %0, <4 x $1> undef, <2 x i32> <i32 2, i32 3>
|
||||
%vh = call <2 x $1> $2(<2 x $1> %v0, <2 x $1> %v1)
|
||||
%vh0 = extractelement <2 x $1> %vh, i32 0
|
||||
%vh1 = extractelement <2 x $1> %vh, i32 1
|
||||
%r = call $1$3 ($1 %vh0, $1 %vh1)
|
||||
ret $1 %r
|
||||
')
|
||||
|
||||
declare <2 x float> @llvm.arm.neon.vpadd.v2f32(<2 x float>, <2 x float>) nounwind readnone
|
||||
|
||||
define internal float @add_f32(float, float) nounwind readnone alwaysinline {
|
||||
%r = fadd float %0, %1
|
||||
ret float %r
|
||||
}
|
||||
|
||||
define float @__reduce_add_float(<4 x float>) nounwind readnone alwaysinline {
|
||||
neon_reduce(float, @llvm.arm.neon.vpadd.v2f32, @add_f32)
|
||||
}
|
||||
|
||||
declare <2 x float> @llvm.arm.neon.vpmins.v2f32(<2 x float>, <2 x float>) nounwind readnone
|
||||
|
||||
define internal float @min_f32(float, float) nounwind readnone alwaysinline {
|
||||
%cmp = fcmp olt float %0, %1
|
||||
%r = select i1 %cmp, float %0, float %1
|
||||
ret float %r
|
||||
}
|
||||
|
||||
define float @__reduce_min_float(<4 x float>) nounwind readnone alwaysinline {
|
||||
neon_reduce(float, @llvm.arm.neon.vpmins.v2f32, @min_f32)
|
||||
}
|
||||
|
||||
declare <2 x float> @llvm.arm.neon.vpmaxs.v2f32(<2 x float>, <2 x float>) nounwind readnone
|
||||
|
||||
define internal float @max_f32(float, float) nounwind readnone alwaysinline {
|
||||
%cmp = fcmp ugt float %0, %1
|
||||
%r = select i1 %cmp, float %0, float %1
|
||||
ret float %r
|
||||
}
|
||||
|
||||
define float @__reduce_max_float(<4 x float>) nounwind readnone alwaysinline {
|
||||
neon_reduce(float, @llvm.arm.neon.vpmaxs.v2f32, @max_f32)
|
||||
}
|
||||
|
||||
declare <4 x i16> @llvm.arm.neon.vpaddls.v4i16.v8i8(<8 x i8>) nounwind readnone
|
||||
|
||||
define i16 @__reduce_add_int8(<WIDTH x i8>) nounwind readnone alwaysinline {
|
||||
%v8 = shufflevector <4 x i8> %0, <4 x i8> zeroinitializer,
|
||||
<8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 4, i32 4, i32 4>
|
||||
%a16 = call <4 x i16> @llvm.arm.neon.vpaddls.v4i16.v8i8(<8 x i8> %v8)
|
||||
%a32 = call <2 x i32> @llvm.arm.neon.vpaddlu.v2i32.v4i16(<4 x i16> %a16)
|
||||
%a0 = extractelement <2 x i32> %a32, i32 0
|
||||
%a1 = extractelement <2 x i32> %a32, i32 1
|
||||
%r = add i32 %a0, %a1
|
||||
%r16 = trunc i32 %r to i16
|
||||
ret i16 %r16
|
||||
}
|
||||
|
||||
declare <2 x i32> @llvm.arm.neon.vpaddlu.v2i32.v4i16(<4 x i16>) nounwind readnone
|
||||
|
||||
define i32 @__reduce_add_int16(<WIDTH x i16>) nounwind readnone alwaysinline {
|
||||
%a32 = call <2 x i32> @llvm.arm.neon.vpaddlu.v2i32.v4i16(<4 x i16> %0)
|
||||
%a0 = extractelement <2 x i32> %a32, i32 0
|
||||
%a1 = extractelement <2 x i32> %a32, i32 1
|
||||
%r = add i32 %a0, %a1
|
||||
ret i32 %r
|
||||
}
|
||||
|
||||
declare <2 x i64> @llvm.arm.neon.vpaddlu.v2i64.v4i32(<4 x i32>) nounwind readnone
|
||||
|
||||
define i64 @__reduce_add_int32(<WIDTH x i32>) nounwind readnone alwaysinline {
|
||||
%a64 = call <2 x i64> @llvm.arm.neon.vpaddlu.v2i64.v4i32(<4 x i32> %0)
|
||||
%a0 = extractelement <2 x i64> %a64, i32 0
|
||||
%a1 = extractelement <2 x i64> %a64, i32 1
|
||||
%r = add i64 %a0, %a1
|
||||
ret i64 %r
|
||||
}
|
||||
|
||||
declare <2 x i32> @llvm.arm.neon.vpmins.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
|
||||
|
||||
define internal i32 @min_si32(i32, i32) nounwind readnone alwaysinline {
|
||||
%cmp = icmp slt i32 %0, %1
|
||||
%r = select i1 %cmp, i32 %0, i32 %1
|
||||
ret i32 %r
|
||||
}
|
||||
|
||||
define i32 @__reduce_min_int32(<4 x i32>) nounwind readnone alwaysinline {
|
||||
neon_reduce(i32, @llvm.arm.neon.vpmins.v2i32, @min_si32)
|
||||
}
|
||||
|
||||
declare <2 x i32> @llvm.arm.neon.vpmaxs.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
|
||||
|
||||
define internal i32 @max_si32(i32, i32) nounwind readnone alwaysinline {
|
||||
%cmp = icmp sgt i32 %0, %1
|
||||
%r = select i1 %cmp, i32 %0, i32 %1
|
||||
ret i32 %r
|
||||
}
|
||||
|
||||
define i32 @__reduce_max_int32(<4 x i32>) nounwind readnone alwaysinline {
|
||||
neon_reduce(i32, @llvm.arm.neon.vpmaxs.v2i32, @max_si32)
|
||||
}
|
||||
|
||||
declare <2 x i32> @llvm.arm.neon.vpminu.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
|
||||
|
||||
define internal i32 @min_ui32(i32, i32) nounwind readnone alwaysinline {
|
||||
%cmp = icmp ult i32 %0, %1
|
||||
%r = select i1 %cmp, i32 %0, i32 %1
|
||||
ret i32 %r
|
||||
}
|
||||
|
||||
define i32 @__reduce_min_uint32(<4 x i32>) nounwind readnone alwaysinline {
|
||||
neon_reduce(i32, @llvm.arm.neon.vpmins.v2i32, @min_ui32)
|
||||
}
|
||||
|
||||
declare <2 x i32> @llvm.arm.neon.vpmaxu.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
|
||||
|
||||
define internal i32 @max_ui32(i32, i32) nounwind readnone alwaysinline {
|
||||
%cmp = icmp ugt i32 %0, %1
|
||||
%r = select i1 %cmp, i32 %0, i32 %1
|
||||
ret i32 %r
|
||||
}
|
||||
|
||||
define i32 @__reduce_max_uint32(<4 x i32>) nounwind readnone alwaysinline {
|
||||
neon_reduce(i32, @llvm.arm.neon.vpmaxs.v2i32, @max_ui32)
|
||||
}
|
||||
|
||||
define double @__reduce_add_double(<4 x double>) nounwind readnone alwaysinline {
|
||||
%v0 = shufflevector <4 x double> %0, <4 x double> undef,
|
||||
<2 x i32> <i32 0, i32 1>
|
||||
%v1 = shufflevector <4 x double> %0, <4 x double> undef,
|
||||
<2 x i32> <i32 2, i32 3>
|
||||
%sum = fadd <2 x double> %v0, %v1
|
||||
%e0 = extractelement <2 x double> %sum, i32 0
|
||||
%e1 = extractelement <2 x double> %sum, i32 1
|
||||
%m = fadd double %e0, %e1
|
||||
ret double %m
|
||||
}
|
||||
|
||||
define double @__reduce_min_double(<4 x double>) nounwind readnone alwaysinline {
|
||||
reduce4(double, @__min_varying_double, @__min_uniform_double)
|
||||
}
|
||||
|
||||
define double @__reduce_max_double(<4 x double>) nounwind readnone alwaysinline {
|
||||
reduce4(double, @__max_varying_double, @__max_uniform_double)
|
||||
}
|
||||
|
||||
define i64 @__reduce_add_int64(<4 x i64>) nounwind readnone alwaysinline {
|
||||
%v0 = shufflevector <4 x i64> %0, <4 x i64> undef,
|
||||
<2 x i32> <i32 0, i32 1>
|
||||
%v1 = shufflevector <4 x i64> %0, <4 x i64> undef,
|
||||
<2 x i32> <i32 2, i32 3>
|
||||
%sum = add <2 x i64> %v0, %v1
|
||||
%e0 = extractelement <2 x i64> %sum, i32 0
|
||||
%e1 = extractelement <2 x i64> %sum, i32 1
|
||||
%m = add i64 %e0, %e1
|
||||
ret i64 %m
|
||||
}
|
||||
|
||||
define i64 @__reduce_min_int64(<4 x i64>) nounwind readnone alwaysinline {
|
||||
reduce4(i64, @__min_varying_int64, @__min_uniform_int64)
|
||||
}
|
||||
|
||||
define i64 @__reduce_max_int64(<4 x i64>) nounwind readnone alwaysinline {
|
||||
reduce4(i64, @__max_varying_int64, @__max_uniform_int64)
|
||||
}
|
||||
|
||||
define i64 @__reduce_min_uint64(<4 x i64>) nounwind readnone alwaysinline {
|
||||
reduce4(i64, @__min_varying_uint64, @__min_uniform_uint64)
|
||||
}
|
||||
|
||||
define i64 @__reduce_max_uint64(<4 x i64>) nounwind readnone alwaysinline {
|
||||
reduce4(i64, @__max_varying_uint64, @__max_uniform_uint64)
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; int8/int16
|
||||
|
||||
declare <4 x i8> @llvm.arm.neon.vrhaddu.v4i8(<4 x i8>, <4 x i8>) nounwind readnone
|
||||
|
||||
define <4 x i8> @__avg_up_uint8(<4 x i8>, <4 x i8>) nounwind readnone alwaysinline {
|
||||
%r = call <4 x i8> @llvm.arm.neon.vrhaddu.v4i8(<4 x i8> %0, <4 x i8> %1)
|
||||
ret <4 x i8> %r
|
||||
}
|
||||
|
||||
declare <4 x i8> @llvm.arm.neon.vrhadds.v4i8(<4 x i8>, <4 x i8>) nounwind readnone
|
||||
|
||||
define <4 x i8> @__avg_up_int8(<4 x i8>, <4 x i8>) nounwind readnone alwaysinline {
|
||||
%r = call <4 x i8> @llvm.arm.neon.vrhadds.v4i8(<4 x i8> %0, <4 x i8> %1)
|
||||
ret <4 x i8> %r
|
||||
}
|
||||
|
||||
declare <4 x i8> @llvm.arm.neon.vhaddu.v4i8(<4 x i8>, <4 x i8>) nounwind readnone
|
||||
|
||||
define <4 x i8> @__avg_down_uint8(<4 x i8>, <4 x i8>) nounwind readnone alwaysinline {
|
||||
%r = call <4 x i8> @llvm.arm.neon.vhaddu.v4i8(<4 x i8> %0, <4 x i8> %1)
|
||||
ret <4 x i8> %r
|
||||
}
|
||||
|
||||
declare <4 x i8> @llvm.arm.neon.vhadds.v4i8(<4 x i8>, <4 x i8>) nounwind readnone
|
||||
|
||||
define <4 x i8> @__avg_down_int8(<4 x i8>, <4 x i8>) nounwind readnone alwaysinline {
|
||||
%r = call <4 x i8> @llvm.arm.neon.vhadds.v4i8(<4 x i8> %0, <4 x i8> %1)
|
||||
ret <4 x i8> %r
|
||||
}
|
||||
|
||||
declare <4 x i16> @llvm.arm.neon.vrhaddu.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
|
||||
|
||||
define <4 x i16> @__avg_up_uint16(<4 x i16>, <4 x i16>) nounwind readnone alwaysinline {
|
||||
%r = call <4 x i16> @llvm.arm.neon.vrhaddu.v4i16(<4 x i16> %0, <4 x i16> %1)
|
||||
ret <4 x i16> %r
|
||||
}
|
||||
|
||||
declare <4 x i16> @llvm.arm.neon.vrhadds.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
|
||||
|
||||
define <4 x i16> @__avg_up_int16(<4 x i16>, <4 x i16>) nounwind readnone alwaysinline {
|
||||
%r = call <4 x i16> @llvm.arm.neon.vrhadds.v4i16(<4 x i16> %0, <4 x i16> %1)
|
||||
ret <4 x i16> %r
|
||||
}
|
||||
|
||||
declare <4 x i16> @llvm.arm.neon.vhaddu.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
|
||||
|
||||
define <4 x i16> @__avg_down_uint16(<4 x i16>, <4 x i16>) nounwind readnone alwaysinline {
|
||||
%r = call <4 x i16> @llvm.arm.neon.vhaddu.v4i16(<4 x i16> %0, <4 x i16> %1)
|
||||
ret <4 x i16> %r
|
||||
}
|
||||
|
||||
declare <4 x i16> @llvm.arm.neon.vhadds.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
|
||||
|
||||
define <4 x i16> @__avg_down_int16(<4 x i16>, <4 x i16>) nounwind readnone alwaysinline {
|
||||
%r = call <4 x i16> @llvm.arm.neon.vhadds.v4i16(<4 x i16> %0, <4 x i16> %1)
|
||||
ret <4 x i16> %r
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; reciprocals in double precision, if supported
|
||||
|
||||
rsqrtd_decl()
|
||||
rcpd_decl()
|
||||
|
||||
transcendetals_decl()
|
||||
trigonometry_decl()
|
||||
saturation_arithmetic()
|
||||
593
builtins/target-neon-8.ll
Normal file
593
builtins/target-neon-8.ll
Normal file
@@ -0,0 +1,593 @@
|
||||
;;
|
||||
;; target-neon-8.ll
|
||||
;;
|
||||
;; Copyright(c) 2013-2015 Google, Inc.
|
||||
;;
|
||||
;; All rights reserved.
|
||||
;;
|
||||
;; Redistribution and use in source and binary forms, with or without
|
||||
;; modification, are permitted provided that the following conditions are
|
||||
;; met:
|
||||
;;
|
||||
;; * Redistributions of source code must retain the above copyright
|
||||
;; notice, this list of conditions and the following disclaimer.
|
||||
;;
|
||||
;; * Redistributions in binary form must reproduce the above copyright
|
||||
;; notice, this list of conditions and the following disclaimer in the
|
||||
;; documentation and/or other materials provided with the distribution.
|
||||
;;
|
||||
;; * Neither the name of Matt Pharr nor the names of its
|
||||
;; contributors may be used to endorse or promote products derived from
|
||||
;; this software without specific prior written permission.
|
||||
;;
|
||||
;;
|
||||
;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
||||
;; IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
||||
;; TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
|
||||
;; PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
|
||||
;; OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||
;; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
;; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
;; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
;; LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
;; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
;; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
define(`WIDTH',`16')
|
||||
define(`MASK',`i8')
|
||||
|
||||
include(`util.m4')
|
||||
include(`target-neon-common.ll')
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; half conversion routines
|
||||
|
||||
define <16 x float> @__half_to_float_varying(<16 x i16> %v) nounwind readnone alwaysinline {
|
||||
unary4to16conv(r, i16, float, @llvm.arm.neon.vcvthf2fp, %v)
|
||||
ret <16 x float> %r
|
||||
}
|
||||
|
||||
define <16 x i16> @__float_to_half_varying(<16 x float> %v) nounwind readnone alwaysinline {
|
||||
unary4to16conv(r, float, i16, @llvm.arm.neon.vcvtfp2hf, %v)
|
||||
ret <16 x i16> %r
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; math
|
||||
|
||||
;; round/floor/ceil
|
||||
|
||||
;; FIXME: grabbed these from the sse2 target, which does not have native
|
||||
;; instructions for these. Is there a better approach for NEON?
|
||||
|
||||
define <16 x float> @__round_varying_float(<16 x float>) nounwind readonly alwaysinline {
|
||||
%float_to_int_bitcast.i.i.i.i = bitcast <16 x float> %0 to <16 x i32>
|
||||
%bitop.i.i = and <16 x i32> %float_to_int_bitcast.i.i.i.i,
|
||||
<i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648,
|
||||
i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648,
|
||||
i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648,
|
||||
i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648>
|
||||
%bitop.i = xor <16 x i32> %float_to_int_bitcast.i.i.i.i, %bitop.i.i
|
||||
%int_to_float_bitcast.i.i40.i = bitcast <16 x i32> %bitop.i to <16 x float>
|
||||
%binop.i = fadd <16 x float> %int_to_float_bitcast.i.i40.i,
|
||||
<float 8.388608e+06, float 8.388608e+06, float 8.388608e+06, float 8.388608e+06,
|
||||
float 8.388608e+06, float 8.388608e+06, float 8.388608e+06, float 8.388608e+06,
|
||||
float 8.388608e+06, float 8.388608e+06, float 8.388608e+06, float 8.388608e+06,
|
||||
float 8.388608e+06, float 8.388608e+06, float 8.388608e+06, float 8.388608e+06>
|
||||
%binop21.i = fadd <16 x float> %binop.i,
|
||||
<float -8.388608e+06, float -8.388608e+06, float -8.388608e+06, float -8.388608e+06,
|
||||
float -8.388608e+06, float -8.388608e+06, float -8.388608e+06, float -8.388608e+06,
|
||||
float -8.388608e+06, float -8.388608e+06, float -8.388608e+06, float -8.388608e+06,
|
||||
float -8.388608e+06, float -8.388608e+06, float -8.388608e+06, float -8.388608e+06>
|
||||
%float_to_int_bitcast.i.i.i = bitcast <16 x float> %binop21.i to <16 x i32>
|
||||
%bitop31.i = xor <16 x i32> %float_to_int_bitcast.i.i.i, %bitop.i.i
|
||||
%int_to_float_bitcast.i.i.i = bitcast <16 x i32> %bitop31.i to <16 x float>
|
||||
ret <16 x float> %int_to_float_bitcast.i.i.i
|
||||
}
|
||||
|
||||
define <16 x float> @__floor_varying_float(<16 x float>) nounwind readonly alwaysinline {
|
||||
%calltmp.i = tail call <16 x float> @__round_varying_float(<16 x float> %0) nounwind
|
||||
%bincmp.i = fcmp ogt <16 x float> %calltmp.i, %0
|
||||
%val_to_boolvec32.i = sext <16 x i1> %bincmp.i to <16 x i32>
|
||||
%bitop.i = and <16 x i32> %val_to_boolvec32.i,
|
||||
<i32 -1082130432, i32 -1082130432, i32 -1082130432, i32 -1082130432,
|
||||
i32 -1082130432, i32 -1082130432, i32 -1082130432, i32 -1082130432,
|
||||
i32 -1082130432, i32 -1082130432, i32 -1082130432, i32 -1082130432,
|
||||
i32 -1082130432, i32 -1082130432, i32 -1082130432, i32 -1082130432>
|
||||
%int_to_float_bitcast.i.i.i = bitcast <16 x i32> %bitop.i to <16 x float>
|
||||
%binop.i = fadd <16 x float> %calltmp.i, %int_to_float_bitcast.i.i.i
|
||||
ret <16 x float> %binop.i
|
||||
}
|
||||
|
||||
define <16 x float> @__ceil_varying_float(<16 x float>) nounwind readonly alwaysinline {
|
||||
%calltmp.i = tail call <16 x float> @__round_varying_float(<16 x float> %0) nounwind
|
||||
%bincmp.i = fcmp olt <16 x float> %calltmp.i, %0
|
||||
%val_to_boolvec32.i = sext <16 x i1> %bincmp.i to <16 x i32>
|
||||
%bitop.i = and <16 x i32> %val_to_boolvec32.i,
|
||||
<i32 1065353216, i32 1065353216, i32 1065353216, i32 1065353216,
|
||||
i32 1065353216, i32 1065353216, i32 1065353216, i32 1065353216,
|
||||
i32 1065353216, i32 1065353216, i32 1065353216, i32 1065353216,
|
||||
i32 1065353216, i32 1065353216, i32 1065353216, i32 1065353216>
|
||||
%int_to_float_bitcast.i.i.i = bitcast <16 x i32> %bitop.i to <16 x float>
|
||||
%binop.i = fadd <16 x float> %calltmp.i, %int_to_float_bitcast.i.i.i
|
||||
ret <16 x float> %binop.i
|
||||
}
|
||||
|
||||
;; FIXME: rounding doubles and double vectors needs to be implemented
|
||||
declare <WIDTH x double> @__round_varying_double(<WIDTH x double>) nounwind readnone
|
||||
declare <WIDTH x double> @__floor_varying_double(<WIDTH x double>) nounwind readnone
|
||||
declare <WIDTH x double> @__ceil_varying_double(<WIDTH x double>) nounwind readnone
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; min/max
|
||||
|
||||
declare <4 x float> @llvm.arm.neon.vmins.v4f32(<4 x float>, <4 x float>) nounwind readnone
|
||||
declare <4 x float> @llvm.arm.neon.vmaxs.v4f32(<4 x float>, <4 x float>) nounwind readnone
|
||||
|
||||
define <WIDTH x float> @__max_varying_float(<WIDTH x float>,
|
||||
<WIDTH x float>) nounwind readnone alwaysinline {
|
||||
binary4to16(r, float, @llvm.arm.neon.vmaxs.v4f32, %0, %1)
|
||||
ret <WIDTH x float> %r
|
||||
}
|
||||
|
||||
define <WIDTH x float> @__min_varying_float(<WIDTH x float>,
|
||||
<WIDTH x float>) nounwind readnone alwaysinline {
|
||||
binary4to16(r, float, @llvm.arm.neon.vmins.v4f32, %0, %1)
|
||||
ret <WIDTH x float> %r
|
||||
}
|
||||
|
||||
declare <4 x i32> @llvm.arm.neon.vmins.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
|
||||
declare <4 x i32> @llvm.arm.neon.vminu.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
|
||||
declare <4 x i32> @llvm.arm.neon.vmaxs.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
|
||||
declare <4 x i32> @llvm.arm.neon.vmaxu.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
|
||||
|
||||
define <WIDTH x i32> @__min_varying_int32(<WIDTH x i32>, <WIDTH x i32>) nounwind readnone alwaysinline {
|
||||
binary4to16(r, i32, @llvm.arm.neon.vmins.v4i32, %0, %1)
|
||||
ret <WIDTH x i32> %r
|
||||
}
|
||||
|
||||
define <WIDTH x i32> @__max_varying_int32(<WIDTH x i32>, <WIDTH x i32>) nounwind readnone alwaysinline {
|
||||
binary4to16(r, i32, @llvm.arm.neon.vmaxs.v4i32, %0, %1)
|
||||
ret <WIDTH x i32> %r
|
||||
}
|
||||
|
||||
define <WIDTH x i32> @__min_varying_uint32(<WIDTH x i32>, <WIDTH x i32>) nounwind readnone alwaysinline {
|
||||
binary4to16(r, i32, @llvm.arm.neon.vminu.v4i32, %0, %1)
|
||||
ret <WIDTH x i32> %r
|
||||
}
|
||||
|
||||
define <WIDTH x i32> @__max_varying_uint32(<WIDTH x i32>, <WIDTH x i32>) nounwind readnone alwaysinline {
|
||||
binary4to16(r, i32, @llvm.arm.neon.vmaxu.v4i32, %0, %1)
|
||||
ret <WIDTH x i32> %r
|
||||
}
|
||||
|
||||
;; sqrt/rsqrt/rcp
|
||||
|
||||
declare <4 x float> @llvm.arm.neon.vrecpe.v4f32(<4 x float>) nounwind readnone
|
||||
declare <4 x float> @llvm.arm.neon.vrecps.v4f32(<4 x float>, <4 x float>) nounwind readnone
|
||||
|
||||
define <WIDTH x float> @__rcp_varying_float(<WIDTH x float> %d) nounwind readnone alwaysinline {
|
||||
unary4to16(x0, float, @llvm.arm.neon.vrecpe.v4f32, %d)
|
||||
binary4to16(x0_nr, float, @llvm.arm.neon.vrecps.v4f32, %d, %x0)
|
||||
%x1 = fmul <WIDTH x float> %x0, %x0_nr
|
||||
binary4to16(x1_nr, float, @llvm.arm.neon.vrecps.v4f32, %d, %x1)
|
||||
%x2 = fmul <WIDTH x float> %x1, %x1_nr
|
||||
ret <WIDTH x float> %x2
|
||||
}
|
||||
|
||||
declare <4 x float> @llvm.arm.neon.vrsqrte.v4f32(<4 x float>) nounwind readnone
|
||||
declare <4 x float> @llvm.arm.neon.vrsqrts.v4f32(<4 x float>, <4 x float>) nounwind readnone
|
||||
|
||||
define <WIDTH x float> @__rsqrt_varying_float(<WIDTH x float> %d) nounwind readnone alwaysinline {
|
||||
unary4to16(x0, float, @llvm.arm.neon.vrsqrte.v4f32, %d)
|
||||
%x0_2 = fmul <WIDTH x float> %x0, %x0
|
||||
binary4to16(x0_nr, float, @llvm.arm.neon.vrsqrts.v4f32, %d, %x0_2)
|
||||
%x1 = fmul <WIDTH x float> %x0, %x0_nr
|
||||
%x1_2 = fmul <WIDTH x float> %x1, %x1
|
||||
binary4to16(x1_nr, float, @llvm.arm.neon.vrsqrts.v4f32, %d, %x1_2)
|
||||
%x2 = fmul <WIDTH x float> %x1, %x1_nr
|
||||
ret <WIDTH x float> %x2
|
||||
}
|
||||
|
||||
define float @__rsqrt_uniform_float(float) nounwind readnone alwaysinline {
|
||||
%v1 = bitcast float %0 to <1 x float>
|
||||
%vs = shufflevector <1 x float> %v1, <1 x float> undef,
|
||||
<16 x i32> <i32 0, i32 undef, i32 undef, i32 undef,
|
||||
i32 undef, i32 undef, i32 undef, i32 undef,
|
||||
i32 undef, i32 undef, i32 undef, i32 undef,
|
||||
i32 undef, i32 undef, i32 undef, i32 undef>
|
||||
%vr = call <16 x float> @__rsqrt_varying_float(<16 x float> %vs)
|
||||
%r = extractelement <16 x float> %vr, i32 0
|
||||
ret float %r
|
||||
}
|
||||
|
||||
define float @__rcp_uniform_float(float) nounwind readnone alwaysinline {
|
||||
%v1 = bitcast float %0 to <1 x float>
|
||||
%vs = shufflevector <1 x float> %v1, <1 x float> undef,
|
||||
<16 x i32> <i32 0, i32 undef, i32 undef, i32 undef,
|
||||
i32 undef, i32 undef, i32 undef, i32 undef,
|
||||
i32 undef, i32 undef, i32 undef, i32 undef,
|
||||
i32 undef, i32 undef, i32 undef, i32 undef>
|
||||
%vr = call <16 x float> @__rcp_varying_float(<16 x float> %vs)
|
||||
%r = extractelement <16 x float> %vr, i32 0
|
||||
ret float %r
|
||||
}
|
||||
|
||||
declare <4 x float> @llvm.sqrt.v4f32(<4 x float>)
|
||||
|
||||
define <WIDTH x float> @__sqrt_varying_float(<WIDTH x float>) nounwind readnone alwaysinline {
|
||||
unary4to16(result, float, @llvm.sqrt.v4f32, %0)
|
||||
;; this returns nan for v=0, which is undesirable..
|
||||
;; %rsqrt = call <WIDTH x float> @__rsqrt_varying_float(<WIDTH x float> %0)
|
||||
;; %result = fmul <4 x float> %rsqrt, %0
|
||||
ret <16 x float> %result
|
||||
}
|
||||
|
||||
declare <4 x double> @llvm.sqrt.v4f64(<4 x double>)
|
||||
|
||||
define <WIDTH x double> @__sqrt_varying_double(<WIDTH x double>) nounwind readnone alwaysinline {
|
||||
unary4to16(r, double, @llvm.sqrt.v4f64, %0)
|
||||
ret <WIDTH x double> %r
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; reductions
|
||||
|
||||
define i64 @__movmsk(<WIDTH x MASK>) nounwind readnone alwaysinline {
|
||||
%and_mask = and <WIDTH x i8> %0,
|
||||
<i8 1, i8 2, i8 4, i8 8, i8 16, i8 32, i8 64, i8 128,
|
||||
i8 1, i8 2, i8 4, i8 8, i8 16, i8 32, i8 64, i8 128>
|
||||
%v8 = call <8 x i16> @llvm.arm.neon.vpaddlu.v8i16.v16i8(<16 x i8> %and_mask)
|
||||
%v4 = call <4 x i32> @llvm.arm.neon.vpaddlu.v4i32.v8i16(<8 x i16> %v8)
|
||||
%v2 = call <2 x i64> @llvm.arm.neon.vpaddlu.v2i64.v4i32(<4 x i32> %v4)
|
||||
%va = extractelement <2 x i64> %v2, i32 0
|
||||
%vb = extractelement <2 x i64> %v2, i32 1
|
||||
%vbshift = shl i64 %vb, 8
|
||||
%v = or i64 %va, %vbshift
|
||||
ret i64 %v
|
||||
}
|
||||
|
||||
define i1 @__any(<WIDTH x MASK>) nounwind readnone alwaysinline {
|
||||
v16tov8(MASK, %0, %v8a, %v8b)
|
||||
%vor8 = or <8 x MASK> %v8a, %v8b
|
||||
%v16 = sext <8 x i8> %vor8 to <8 x i16>
|
||||
v8tov4(i16, %v16, %v16a, %v16b)
|
||||
%vor16 = or <4 x i16> %v16a, %v16b
|
||||
%v32 = sext <4 x i16> %vor16 to <4 x i32>
|
||||
v4tov2(i32, %v32, %v32a, %v32b)
|
||||
%vor32 = or <2 x i32> %v32a, %v32b
|
||||
%v0 = extractelement <2 x i32> %vor32, i32 0
|
||||
%v1 = extractelement <2 x i32> %vor32, i32 1
|
||||
%v = or i32 %v0, %v1
|
||||
%cmp = icmp ne i32 %v, 0
|
||||
ret i1 %cmp
|
||||
}
|
||||
|
||||
define i1 @__all(<WIDTH x MASK>) nounwind readnone alwaysinline {
|
||||
v16tov8(MASK, %0, %v8a, %v8b)
|
||||
%vand8 = and <8 x MASK> %v8a, %v8b
|
||||
%v16 = sext <8 x i8> %vand8 to <8 x i16>
|
||||
v8tov4(i16, %v16, %v16a, %v16b)
|
||||
%vand16 = and <4 x i16> %v16a, %v16b
|
||||
%v32 = sext <4 x i16> %vand16 to <4 x i32>
|
||||
v4tov2(i32, %v32, %v32a, %v32b)
|
||||
%vand32 = and <2 x i32> %v32a, %v32b
|
||||
%v0 = extractelement <2 x i32> %vand32, i32 0
|
||||
%v1 = extractelement <2 x i32> %vand32, i32 1
|
||||
%v = and i32 %v0, %v1
|
||||
%cmp = icmp ne i32 %v, 0
|
||||
ret i1 %cmp
|
||||
}
|
||||
|
||||
define i1 @__none(<WIDTH x MASK>) nounwind readnone alwaysinline {
|
||||
%any = call i1 @__any(<WIDTH x MASK> %0)
|
||||
%none = icmp eq i1 %any, 0
|
||||
ret i1 %none
|
||||
}
|
||||
|
||||
;; $1: scalar type
|
||||
;; $2: vector/vector reduce function (2 x <WIDTH x vec> -> <WIDTH x vec>)
|
||||
;; $3: pairwise vector reduce function (2 x <2 x vec> -> <2 x vec>)
|
||||
;; $4: scalar reduce function
|
||||
|
||||
define(`neon_reduce', `
|
||||
v16tov8($1, %0, %va, %vb)
|
||||
%va_16 = shufflevector <8 x $1> %va, <8 x $1> undef,
|
||||
<16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
|
||||
i32 undef, i32 undef, i32 undef, i32 undef,
|
||||
i32 undef, i32 undef, i32 undef, i32 undef>
|
||||
%vb_16 = shufflevector <8 x $1> %vb, <8 x $1> undef,
|
||||
<16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
|
||||
i32 undef, i32 undef, i32 undef, i32 undef,
|
||||
i32 undef, i32 undef, i32 undef, i32 undef>
|
||||
%v8 = call <16 x $1> $2(<16 x $1> %va_16, <16 x $1> %vb_16)
|
||||
|
||||
%v8a = shufflevector <16 x $1> %v8, <16 x $1> undef,
|
||||
<16 x i32> <i32 0, i32 1, i32 2, i32 3,
|
||||
i32 undef, i32 undef, i32 undef, i32 undef,
|
||||
i32 undef, i32 undef, i32 undef, i32 undef,
|
||||
i32 undef, i32 undef, i32 undef, i32 undef>
|
||||
%v8b = shufflevector <16 x $1> %v8, <16 x $1> undef,
|
||||
<16 x i32> <i32 4, i32 5, i32 6, i32 7,
|
||||
i32 undef, i32 undef, i32 undef, i32 undef,
|
||||
i32 undef, i32 undef, i32 undef, i32 undef,
|
||||
i32 undef, i32 undef, i32 undef, i32 undef>
|
||||
|
||||
%v4 = call <16 x $1> $2(<16 x $1> %v8a, <16 x $1> %v8b)
|
||||
|
||||
%vfirst_4 = shufflevector <16 x $1> %v4, <16 x $1> undef,
|
||||
<4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
||||
v4tov2($1, %vfirst_4, %v0, %v1)
|
||||
%vh = call <2 x $1> $3(<2 x $1> %v0, <2 x $1> %v1)
|
||||
%vh0 = extractelement <2 x $1> %vh, i32 0
|
||||
%vh1 = extractelement <2 x $1> %vh, i32 1
|
||||
%r = call $1 $4($1 %vh0, $1 %vh1)
|
||||
ret $1 %r
|
||||
')
|
||||
|
||||
declare <2 x float> @llvm.arm.neon.vpadd.v2f32(<2 x float>, <2 x float>) nounwind readnone
|
||||
|
||||
define internal float @add_f32(float, float) nounwind readnone alwaysinline {
|
||||
%r = fadd float %0, %1
|
||||
ret float %r
|
||||
}
|
||||
|
||||
define internal <WIDTH x float> @__add_varying_float(<WIDTH x float>, <WIDTH x float>) nounwind readnone alwaysinline {
|
||||
%r = fadd <WIDTH x float> %0, %1
|
||||
ret <WIDTH x float> %r
|
||||
}
|
||||
|
||||
define float @__reduce_add_float(<WIDTH x float>) nounwind readnone alwaysinline {
|
||||
neon_reduce(float, @__add_varying_float, @llvm.arm.neon.vpadd.v2f32, @add_f32)
|
||||
}
|
||||
|
||||
declare <2 x float> @llvm.arm.neon.vpmins.v2f32(<2 x float>, <2 x float>) nounwind readnone
|
||||
|
||||
define internal float @min_f32(float, float) nounwind readnone alwaysinline {
|
||||
%cmp = fcmp olt float %0, %1
|
||||
%r = select i1 %cmp, float %0, float %1
|
||||
ret float %r
|
||||
}
|
||||
|
||||
define float @__reduce_min_float(<WIDTH x float>) nounwind readnone alwaysinline {
|
||||
neon_reduce(float, @__min_varying_float, @llvm.arm.neon.vpmins.v2f32, @min_f32)
|
||||
}
|
||||
|
||||
declare <2 x float> @llvm.arm.neon.vpmaxs.v2f32(<2 x float>, <2 x float>) nounwind readnone
|
||||
|
||||
define internal float @max_f32(float, float) nounwind readnone alwaysinline {
|
||||
%cmp = fcmp ugt float %0, %1
|
||||
%r = select i1 %cmp, float %0, float %1
|
||||
ret float %r
|
||||
}
|
||||
|
||||
define float @__reduce_max_float(<WIDTH x float>) nounwind readnone alwaysinline {
|
||||
neon_reduce(float, @__max_varying_float, @llvm.arm.neon.vpmaxs.v2f32, @max_f32)
|
||||
}
|
||||
|
||||
declare <8 x i16> @llvm.arm.neon.vpaddlu.v8i16.v16i8(<16 x i8>) nounwind readnone
|
||||
declare <4 x i32> @llvm.arm.neon.vpaddlu.v4i32.v8i16(<8 x i16>) nounwind readnone
|
||||
declare <2 x i64> @llvm.arm.neon.vpaddlu.v2i64.v4i32(<4 x i32>) nounwind readnone
|
||||
|
||||
define i64 @__reduce_add_int8(<WIDTH x i8>) nounwind readnone alwaysinline {
|
||||
%a16 = call <8 x i16> @llvm.arm.neon.vpaddlu.v8i16.v16i8(<16 x i8> %0)
|
||||
%a32 = call <4 x i32> @llvm.arm.neon.vpaddlu.v4i32.v8i16(<8 x i16> %a16)
|
||||
%a64 = call <2 x i64> @llvm.arm.neon.vpaddlu.v2i64.v4i32(<4 x i32> %a32)
|
||||
%a0 = extractelement <2 x i64> %a64, i32 0
|
||||
%a1 = extractelement <2 x i64> %a64, i32 1
|
||||
%r = add i64 %a0, %a1
|
||||
ret i64 %r
|
||||
}
|
||||
|
||||
define i64 @__reduce_add_int16(<WIDTH x i16>) nounwind readnone alwaysinline {
|
||||
v16tov8(i16, %0, %va, %vb)
|
||||
%a32 = call <4 x i32> @llvm.arm.neon.vpaddlu.v4i32.v8i16(<8 x i16> %va)
|
||||
%b32 = call <4 x i32> @llvm.arm.neon.vpaddlu.v4i32.v8i16(<8 x i16> %vb)
|
||||
%a64 = call <2 x i64> @llvm.arm.neon.vpaddlu.v2i64.v4i32(<4 x i32> %a32)
|
||||
%b64 = call <2 x i64> @llvm.arm.neon.vpaddlu.v2i64.v4i32(<4 x i32> %b32)
|
||||
%sum = add <2 x i64> %a64, %b64
|
||||
%a0 = extractelement <2 x i64> %sum, i32 0
|
||||
%a1 = extractelement <2 x i64> %sum, i32 1
|
||||
%r = add i64 %a0, %a1
|
||||
ret i64 %r
|
||||
}
|
||||
|
||||
define i64 @__reduce_add_int32(<WIDTH x i32>) nounwind readnone alwaysinline {
|
||||
v16tov4(i32, %0, %va, %vb, %vc, %vd)
|
||||
%a64 = call <2 x i64> @llvm.arm.neon.vpaddlu.v2i64.v4i32(<4 x i32> %va)
|
||||
%b64 = call <2 x i64> @llvm.arm.neon.vpaddlu.v2i64.v4i32(<4 x i32> %vb)
|
||||
%c64 = call <2 x i64> @llvm.arm.neon.vpaddlu.v2i64.v4i32(<4 x i32> %vc)
|
||||
%d64 = call <2 x i64> @llvm.arm.neon.vpaddlu.v2i64.v4i32(<4 x i32> %vd)
|
||||
%ab = add <2 x i64> %a64, %b64
|
||||
%cd = add <2 x i64> %c64, %d64
|
||||
%sum = add <2 x i64> %ab, %cd
|
||||
%a0 = extractelement <2 x i64> %sum, i32 0
|
||||
%a1 = extractelement <2 x i64> %sum, i32 1
|
||||
%r = add i64 %a0, %a1
|
||||
ret i64 %r
|
||||
}
|
||||
|
||||
declare <2 x i32> @llvm.arm.neon.vpmins.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
|
||||
|
||||
define internal i32 @min_si32(i32, i32) nounwind readnone alwaysinline {
|
||||
%cmp = icmp slt i32 %0, %1
|
||||
%r = select i1 %cmp, i32 %0, i32 %1
|
||||
ret i32 %r
|
||||
}
|
||||
|
||||
define i32 @__reduce_min_int32(<WIDTH x i32>) nounwind readnone alwaysinline {
|
||||
neon_reduce(i32, @__min_varying_int32, @llvm.arm.neon.vpmins.v2i32, @min_si32)
|
||||
}
|
||||
|
||||
declare <2 x i32> @llvm.arm.neon.vpmaxs.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
|
||||
|
||||
define internal i32 @max_si32(i32, i32) nounwind readnone alwaysinline {
|
||||
%cmp = icmp sgt i32 %0, %1
|
||||
%r = select i1 %cmp, i32 %0, i32 %1
|
||||
ret i32 %r
|
||||
}
|
||||
|
||||
define i32 @__reduce_max_int32(<WIDTH x i32>) nounwind readnone alwaysinline {
|
||||
neon_reduce(i32, @__max_varying_int32, @llvm.arm.neon.vpmaxs.v2i32, @max_si32)
|
||||
}
|
||||
|
||||
declare <2 x i32> @llvm.arm.neon.vpminu.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
|
||||
|
||||
define internal i32 @min_ui32(i32, i32) nounwind readnone alwaysinline {
|
||||
%cmp = icmp ult i32 %0, %1
|
||||
%r = select i1 %cmp, i32 %0, i32 %1
|
||||
ret i32 %r
|
||||
}
|
||||
|
||||
define i32 @__reduce_min_uint32(<WIDTH x i32>) nounwind readnone alwaysinline {
|
||||
neon_reduce(i32, @__min_varying_uint32, @llvm.arm.neon.vpmins.v2i32, @min_ui32)
|
||||
}
|
||||
|
||||
declare <2 x i32> @llvm.arm.neon.vpmaxu.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
|
||||
|
||||
define internal i32 @max_ui32(i32, i32) nounwind readnone alwaysinline {
|
||||
%cmp = icmp ugt i32 %0, %1
|
||||
%r = select i1 %cmp, i32 %0, i32 %1
|
||||
ret i32 %r
|
||||
}
|
||||
|
||||
define i32 @__reduce_max_uint32(<WIDTH x i32>) nounwind readnone alwaysinline {
|
||||
neon_reduce(i32, @__max_varying_uint32, @llvm.arm.neon.vpmaxs.v2i32, @max_ui32)
|
||||
}
|
||||
|
||||
define internal double @__add_uniform_double(double, double) nounwind readnone alwaysinline {
|
||||
%r = fadd double %0, %1
|
||||
ret double %r
|
||||
}
|
||||
|
||||
define internal <WIDTH x double> @__add_varying_double(<WIDTH x double>, <WIDTH x double>) nounwind readnone alwaysinline {
|
||||
%r = fadd <WIDTH x double> %0, %1
|
||||
ret <WIDTH x double> %r
|
||||
}
|
||||
|
||||
define double @__reduce_add_double(<WIDTH x double>) nounwind readnone alwaysinline {
|
||||
reduce16(double, @__add_varying_double, @__add_uniform_double)
|
||||
}
|
||||
|
||||
define double @__reduce_min_double(<WIDTH x double>) nounwind readnone alwaysinline {
|
||||
reduce16(double, @__min_varying_double, @__min_uniform_double)
|
||||
}
|
||||
|
||||
define double @__reduce_max_double(<WIDTH x double>) nounwind readnone alwaysinline {
|
||||
reduce16(double, @__max_varying_double, @__max_uniform_double)
|
||||
}
|
||||
|
||||
define internal i64 @__add_uniform_int64(i64, i64) nounwind readnone alwaysinline {
|
||||
%r = add i64 %0, %1
|
||||
ret i64 %r
|
||||
}
|
||||
|
||||
define internal <WIDTH x i64> @__add_varying_int64(<WIDTH x i64>, <WIDTH x i64>) nounwind readnone alwaysinline {
|
||||
%r = add <WIDTH x i64> %0, %1
|
||||
ret <WIDTH x i64> %r
|
||||
}
|
||||
|
||||
define i64 @__reduce_add_int64(<WIDTH x i64>) nounwind readnone alwaysinline {
|
||||
reduce16(i64, @__add_varying_int64, @__add_uniform_int64)
|
||||
}
|
||||
|
||||
define i64 @__reduce_min_int64(<WIDTH x i64>) nounwind readnone alwaysinline {
|
||||
reduce16(i64, @__min_varying_int64, @__min_uniform_int64)
|
||||
}
|
||||
|
||||
define i64 @__reduce_max_int64(<WIDTH x i64>) nounwind readnone alwaysinline {
|
||||
reduce16(i64, @__max_varying_int64, @__max_uniform_int64)
|
||||
}
|
||||
|
||||
define i64 @__reduce_min_uint64(<WIDTH x i64>) nounwind readnone alwaysinline {
|
||||
reduce16(i64, @__min_varying_uint64, @__min_uniform_uint64)
|
||||
}
|
||||
|
||||
define i64 @__reduce_max_uint64(<WIDTH x i64>) nounwind readnone alwaysinline {
|
||||
reduce16(i64, @__max_varying_uint64, @__max_uniform_uint64)
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; int8/int16 builtins
|
||||
|
||||
declare <16 x i8> @llvm.arm.neon.vrhaddu.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
|
||||
|
||||
define <16 x i8> @__avg_up_uint8(<16 x i8>, <16 x i8>) nounwind readnone alwaysinline {
|
||||
%r = call <16 x i8> @llvm.arm.neon.vrhaddu.v16i8(<16 x i8> %0, <16 x i8> %1)
|
||||
ret <16 x i8> %r
|
||||
}
|
||||
|
||||
declare <16 x i8> @llvm.arm.neon.vrhadds.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
|
||||
|
||||
define <16 x i8> @__avg_up_int8(<16 x i8>, <16 x i8>) nounwind readnone alwaysinline {
|
||||
%r = call <16 x i8> @llvm.arm.neon.vrhadds.v16i8(<16 x i8> %0, <16 x i8> %1)
|
||||
ret <16 x i8> %r
|
||||
}
|
||||
|
||||
declare <16 x i8> @llvm.arm.neon.vhaddu.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
|
||||
|
||||
define <16 x i8> @__avg_down_uint8(<16 x i8>, <16 x i8>) nounwind readnone alwaysinline {
|
||||
%r = call <16 x i8> @llvm.arm.neon.vhaddu.v16i8(<16 x i8> %0, <16 x i8> %1)
|
||||
ret <16 x i8> %r
|
||||
}
|
||||
|
||||
declare <16 x i8> @llvm.arm.neon.vhadds.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
|
||||
|
||||
define <16 x i8> @__avg_down_int8(<16 x i8>, <16 x i8>) nounwind readnone alwaysinline {
|
||||
%r = call <16 x i8> @llvm.arm.neon.vhadds.v16i8(<16 x i8> %0, <16 x i8> %1)
|
||||
ret <16 x i8> %r
|
||||
}
|
||||
|
||||
declare <8 x i16> @llvm.arm.neon.vrhaddu.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
|
||||
|
||||
define <16 x i16> @__avg_up_uint16(<16 x i16>, <16 x i16>) nounwind readnone alwaysinline {
|
||||
v16tov8(i16, %0, %a0, %b0)
|
||||
v16tov8(i16, %1, %a1, %b1)
|
||||
%r0 = call <8 x i16> @llvm.arm.neon.vrhaddu.v8i16(<8 x i16> %a0, <8 x i16> %a1)
|
||||
%r1 = call <8 x i16> @llvm.arm.neon.vrhaddu.v8i16(<8 x i16> %b0, <8 x i16> %b1)
|
||||
v8tov16(i16, %r0, %r1, %r)
|
||||
ret <16 x i16> %r
|
||||
}
|
||||
|
||||
declare <8 x i16> @llvm.arm.neon.vrhadds.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
|
||||
|
||||
define <16 x i16> @__avg_up_int16(<16 x i16>, <16 x i16>) nounwind readnone alwaysinline {
|
||||
v16tov8(i16, %0, %a0, %b0)
|
||||
v16tov8(i16, %1, %a1, %b1)
|
||||
%r0 = call <8 x i16> @llvm.arm.neon.vrhadds.v8i16(<8 x i16> %a0, <8 x i16> %a1)
|
||||
%r1 = call <8 x i16> @llvm.arm.neon.vrhadds.v8i16(<8 x i16> %b0, <8 x i16> %b1)
|
||||
v8tov16(i16, %r0, %r1, %r)
|
||||
ret <16 x i16> %r
|
||||
}
|
||||
|
||||
declare <8 x i16> @llvm.arm.neon.vhaddu.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
|
||||
|
||||
define <16 x i16> @__avg_down_uint16(<16 x i16>, <16 x i16>) nounwind readnone alwaysinline {
|
||||
v16tov8(i16, %0, %a0, %b0)
|
||||
v16tov8(i16, %1, %a1, %b1)
|
||||
%r0 = call <8 x i16> @llvm.arm.neon.vhaddu.v8i16(<8 x i16> %a0, <8 x i16> %a1)
|
||||
%r1 = call <8 x i16> @llvm.arm.neon.vhaddu.v8i16(<8 x i16> %b0, <8 x i16> %b1)
|
||||
v8tov16(i16, %r0, %r1, %r)
|
||||
ret <16 x i16> %r
|
||||
}
|
||||
|
||||
declare <8 x i16> @llvm.arm.neon.vhadds.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
|
||||
|
||||
define <16 x i16> @__avg_down_int16(<16 x i16>, <16 x i16>) nounwind readnone alwaysinline {
|
||||
v16tov8(i16, %0, %a0, %b0)
|
||||
v16tov8(i16, %1, %a1, %b1)
|
||||
%r0 = call <8 x i16> @llvm.arm.neon.vhadds.v8i16(<8 x i16> %a0, <8 x i16> %a1)
|
||||
%r1 = call <8 x i16> @llvm.arm.neon.vhadds.v8i16(<8 x i16> %b0, <8 x i16> %b1)
|
||||
v8tov16(i16, %r0, %r1, %r)
|
||||
ret <16 x i16> %r
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; reciprocals in double precision, if supported
|
||||
|
||||
rsqrtd_decl()
|
||||
rcpd_decl()
|
||||
|
||||
transcendetals_decl()
|
||||
trigonometry_decl()
|
||||
saturation_arithmetic()
|
||||
354
builtins/target-neon-common.ll
Normal file
354
builtins/target-neon-common.ll
Normal file
@@ -0,0 +1,354 @@
|
||||
;;
|
||||
;; target-neon-common.ll
|
||||
;;
|
||||
;; Copyright(c) 2013-2015 Google, Inc.
|
||||
;;
|
||||
;; All rights reserved.
|
||||
;;
|
||||
;; Redistribution and use in source and binary forms, with or without
|
||||
;; modification, are permitted provided that the following conditions are
|
||||
;; met:
|
||||
;;
|
||||
;; * Redistributions of source code must retain the above copyright
|
||||
;; notice, this list of conditions and the following disclaimer.
|
||||
;;
|
||||
;; * Redistributions in binary form must reproduce the above copyright
|
||||
;; notice, this list of conditions and the following disclaimer in the
|
||||
;; documentation and/or other materials provided with the distribution.
|
||||
;;
|
||||
;; * Neither the name of Matt Pharr nor the names of its
|
||||
;; contributors may be used to endorse or promote products derived from
|
||||
;; this software without specific prior written permission.
|
||||
;;
|
||||
;;
|
||||
;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
||||
;; IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
||||
;; TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
|
||||
;; PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
|
||||
;; OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||
;; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
;; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
;; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
;; LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
;; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
;; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
target datalayout = "e-p:32:32:32-S32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f16:16:16-f32:32:32-f64:32:64-f128:128:128-v64:32:64-v128:32:128-a0:0:64-n32"
|
||||
|
||||
stdlib_core()
|
||||
scans()
|
||||
reduce_equal(WIDTH)
|
||||
rdrand_decls()
|
||||
define_shuffles()
|
||||
aossoa()
|
||||
ctlztz()
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; half conversion routines
|
||||
|
||||
declare <4 x i16> @llvm.arm.neon.vcvtfp2hf(<4 x float>) nounwind readnone
|
||||
declare <4 x float> @llvm.arm.neon.vcvthf2fp(<4 x i16>) nounwind readnone
|
||||
|
||||
define float @__half_to_float_uniform(i16 %v) nounwind readnone alwaysinline {
|
||||
%v1 = bitcast i16 %v to <1 x i16>
|
||||
%vec = shufflevector <1 x i16> %v1, <1 x i16> undef,
|
||||
<4 x i32> <i32 0, i32 0, i32 0, i32 0>
|
||||
%h = call <4 x float> @llvm.arm.neon.vcvthf2fp(<4 x i16> %vec)
|
||||
%r = extractelement <4 x float> %h, i32 0
|
||||
ret float %r
|
||||
}
|
||||
|
||||
define i16 @__float_to_half_uniform(float %v) nounwind readnone alwaysinline {
|
||||
%v1 = bitcast float %v to <1 x float>
|
||||
%vec = shufflevector <1 x float> %v1, <1 x float> undef,
|
||||
<4 x i32> <i32 0, i32 0, i32 0, i32 0>
|
||||
%h = call <4 x i16> @llvm.arm.neon.vcvtfp2hf(<4 x float> %vec)
|
||||
%r = extractelement <4 x i16> %h, i32 0
|
||||
ret i16 %r
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; math
|
||||
|
||||
declare i32 @llvm.arm.get.fpscr() nounwind
|
||||
declare void @llvm.arm.set.fpscr(i32) nounwind
|
||||
|
||||
define void @__fastmath() nounwind alwaysinline {
|
||||
%x = call i32 @llvm.arm.get.fpscr()
|
||||
; Turn on FTZ (bit 24) and default NaN (bit 25)
|
||||
%y = or i32 %x, 50331648
|
||||
call void @llvm.arm.set.fpscr(i32 %y)
|
||||
ret void
|
||||
}
|
||||
|
||||
;; round/floor/ceil
|
||||
|
||||
;; FIXME: grabbed these from the sse2 target, which does not have native
|
||||
;; instructions for these. Is there a better approach for NEON?
|
||||
|
||||
define float @__round_uniform_float(float) nounwind readonly alwaysinline {
|
||||
%float_to_int_bitcast.i.i.i.i = bitcast float %0 to i32
|
||||
%bitop.i.i = and i32 %float_to_int_bitcast.i.i.i.i, -2147483648
|
||||
%bitop.i = xor i32 %bitop.i.i, %float_to_int_bitcast.i.i.i.i
|
||||
%int_to_float_bitcast.i.i40.i = bitcast i32 %bitop.i to float
|
||||
%binop.i = fadd float %int_to_float_bitcast.i.i40.i, 8.388608e+06
|
||||
%binop21.i = fadd float %binop.i, -8.388608e+06
|
||||
%float_to_int_bitcast.i.i.i = bitcast float %binop21.i to i32
|
||||
%bitop31.i = xor i32 %float_to_int_bitcast.i.i.i, %bitop.i.i
|
||||
%int_to_float_bitcast.i.i.i = bitcast i32 %bitop31.i to float
|
||||
ret float %int_to_float_bitcast.i.i.i
|
||||
}
|
||||
|
||||
define float @__floor_uniform_float(float) nounwind readonly alwaysinline {
|
||||
%calltmp.i = tail call float @__round_uniform_float(float %0) nounwind
|
||||
%bincmp.i = fcmp ogt float %calltmp.i, %0
|
||||
%selectexpr.i = sext i1 %bincmp.i to i32
|
||||
%bitop.i = and i32 %selectexpr.i, -1082130432
|
||||
%int_to_float_bitcast.i.i.i = bitcast i32 %bitop.i to float
|
||||
%binop.i = fadd float %calltmp.i, %int_to_float_bitcast.i.i.i
|
||||
ret float %binop.i
|
||||
}
|
||||
|
||||
define float @__ceil_uniform_float(float) nounwind readonly alwaysinline {
|
||||
%calltmp.i = tail call float @__round_uniform_float(float %0) nounwind
|
||||
%bincmp.i = fcmp olt float %calltmp.i, %0
|
||||
%selectexpr.i = sext i1 %bincmp.i to i32
|
||||
%bitop.i = and i32 %selectexpr.i, 1065353216
|
||||
%int_to_float_bitcast.i.i.i = bitcast i32 %bitop.i to float
|
||||
%binop.i = fadd float %calltmp.i, %int_to_float_bitcast.i.i.i
|
||||
ret float %binop.i
|
||||
}
|
||||
|
||||
;; FIXME: rounding doubles and double vectors needs to be implemented
|
||||
declare double @__round_uniform_double(double) nounwind readnone
|
||||
declare double @__floor_uniform_double(double) nounwind readnone
|
||||
declare double @__ceil_uniform_double(double) nounwind readnone
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; min/max
|
||||
|
||||
define float @__max_uniform_float(float, float) nounwind readnone alwaysinline {
|
||||
%cmp = fcmp ugt float %0, %1
|
||||
%r = select i1 %cmp, float %0, float %1
|
||||
ret float %r
|
||||
}
|
||||
|
||||
define float @__min_uniform_float(float, float) nounwind readnone alwaysinline {
|
||||
%cmp = fcmp ult float %0, %1
|
||||
%r = select i1 %cmp, float %0, float %1
|
||||
ret float %r
|
||||
}
|
||||
|
||||
define i32 @__min_uniform_int32(i32, i32) nounwind readnone alwaysinline {
|
||||
%cmp = icmp slt i32 %0, %1
|
||||
%r = select i1 %cmp, i32 %0, i32 %1
|
||||
ret i32 %r
|
||||
}
|
||||
|
||||
define i32 @__max_uniform_int32(i32, i32) nounwind readnone alwaysinline {
|
||||
%cmp = icmp sgt i32 %0, %1
|
||||
%r = select i1 %cmp, i32 %0, i32 %1
|
||||
ret i32 %r
|
||||
}
|
||||
|
||||
define i32 @__min_uniform_uint32(i32, i32) nounwind readnone alwaysinline {
|
||||
%cmp = icmp ult i32 %0, %1
|
||||
%r = select i1 %cmp, i32 %0, i32 %1
|
||||
ret i32 %r
|
||||
}
|
||||
|
||||
define i32 @__max_uniform_uint32(i32, i32) nounwind readnone alwaysinline {
|
||||
%cmp = icmp ugt i32 %0, %1
|
||||
%r = select i1 %cmp, i32 %0, i32 %1
|
||||
ret i32 %r
|
||||
}
|
||||
|
||||
define i64 @__min_uniform_int64(i64, i64) nounwind readnone alwaysinline {
|
||||
%cmp = icmp slt i64 %0, %1
|
||||
%r = select i1 %cmp, i64 %0, i64 %1
|
||||
ret i64 %r
|
||||
}
|
||||
|
||||
define i64 @__max_uniform_int64(i64, i64) nounwind readnone alwaysinline {
|
||||
%cmp = icmp sgt i64 %0, %1
|
||||
%r = select i1 %cmp, i64 %0, i64 %1
|
||||
ret i64 %r
|
||||
}
|
||||
|
||||
define i64 @__min_uniform_uint64(i64, i64) nounwind readnone alwaysinline {
|
||||
%cmp = icmp ult i64 %0, %1
|
||||
%r = select i1 %cmp, i64 %0, i64 %1
|
||||
ret i64 %r
|
||||
}
|
||||
|
||||
define i64 @__max_uniform_uint64(i64, i64) nounwind readnone alwaysinline {
|
||||
%cmp = icmp ugt i64 %0, %1
|
||||
%r = select i1 %cmp, i64 %0, i64 %1
|
||||
ret i64 %r
|
||||
}
|
||||
|
||||
define double @__min_uniform_double(double, double) nounwind readnone alwaysinline {
|
||||
%cmp = fcmp olt double %0, %1
|
||||
%r = select i1 %cmp, double %0, double %1
|
||||
ret double %r
|
||||
}
|
||||
|
||||
define double @__max_uniform_double(double, double) nounwind readnone alwaysinline {
|
||||
%cmp = fcmp ogt double %0, %1
|
||||
%r = select i1 %cmp, double %0, double %1
|
||||
ret double %r
|
||||
}
|
||||
|
||||
define <WIDTH x i64> @__min_varying_int64(<WIDTH x i64>, <WIDTH x i64>) nounwind readnone alwaysinline {
|
||||
%m = icmp slt <WIDTH x i64> %0, %1
|
||||
%r = select <WIDTH x i1> %m, <WIDTH x i64> %0, <WIDTH x i64> %1
|
||||
ret <WIDTH x i64> %r
|
||||
}
|
||||
|
||||
define <WIDTH x i64> @__max_varying_int64(<WIDTH x i64>, <WIDTH x i64>) nounwind readnone alwaysinline {
|
||||
%m = icmp sgt <WIDTH x i64> %0, %1
|
||||
%r = select <WIDTH x i1> %m, <WIDTH x i64> %0, <WIDTH x i64> %1
|
||||
ret <WIDTH x i64> %r
|
||||
}
|
||||
|
||||
define <WIDTH x i64> @__min_varying_uint64(<WIDTH x i64>, <WIDTH x i64>) nounwind readnone alwaysinline {
|
||||
%m = icmp ult <WIDTH x i64> %0, %1
|
||||
%r = select <WIDTH x i1> %m, <WIDTH x i64> %0, <WIDTH x i64> %1
|
||||
ret <WIDTH x i64> %r
|
||||
}
|
||||
|
||||
define <WIDTH x i64> @__max_varying_uint64(<WIDTH x i64>, <WIDTH x i64>) nounwind readnone alwaysinline {
|
||||
%m = icmp ugt <WIDTH x i64> %0, %1
|
||||
%r = select <WIDTH x i1> %m, <WIDTH x i64> %0, <WIDTH x i64> %1
|
||||
ret <WIDTH x i64> %r
|
||||
}
|
||||
|
||||
define <WIDTH x double> @__min_varying_double(<WIDTH x double>,
|
||||
<WIDTH x double>) nounwind readnone alwaysinline {
|
||||
%m = fcmp olt <WIDTH x double> %0, %1
|
||||
%r = select <WIDTH x i1> %m, <WIDTH x double> %0, <WIDTH x double> %1
|
||||
ret <WIDTH x double> %r
|
||||
}
|
||||
|
||||
define <WIDTH x double> @__max_varying_double(<WIDTH x double>,
|
||||
<WIDTH x double>) nounwind readnone alwaysinline {
|
||||
%m = fcmp ogt <WIDTH x double> %0, %1
|
||||
%r = select <WIDTH x i1> %m, <WIDTH x double> %0, <WIDTH x double> %1
|
||||
ret <WIDTH x double> %r
|
||||
}
|
||||
|
||||
;; sqrt/rsqrt/rcp
|
||||
|
||||
declare float @llvm.sqrt.f32(float)
|
||||
|
||||
define float @__sqrt_uniform_float(float) nounwind readnone alwaysinline {
|
||||
%r = call float @llvm.sqrt.f32(float %0)
|
||||
ret float %r
|
||||
}
|
||||
|
||||
declare double @llvm.sqrt.f64(double)
|
||||
|
||||
define double @__sqrt_uniform_double(double) nounwind readnone alwaysinline {
|
||||
%r = call double @llvm.sqrt.f64(double %0)
|
||||
ret double %r
|
||||
}
|
||||
|
||||
;; bit ops
|
||||
|
||||
declare i32 @llvm.ctpop.i32(i32) nounwind readnone
|
||||
declare i64 @llvm.ctpop.i64(i64) nounwind readnone
|
||||
|
||||
define i32 @__popcnt_int32(i32) nounwind readnone alwaysinline {
|
||||
%v = call i32 @llvm.ctpop.i32(i32 %0)
|
||||
ret i32 %v
|
||||
}
|
||||
|
||||
define i64 @__popcnt_int64(i64) nounwind readnone alwaysinline {
|
||||
%v = call i64 @llvm.ctpop.i64(i64 %0)
|
||||
ret i64 %v
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; unaligned loads/loads+broadcasts
|
||||
|
||||
masked_load(i8, 1)
|
||||
masked_load(i16, 2)
|
||||
masked_load(i32, 4)
|
||||
masked_load(float, 4)
|
||||
masked_load(i64, 8)
|
||||
masked_load(double, 8)
|
||||
|
||||
gen_masked_store(i8)
|
||||
gen_masked_store(i16)
|
||||
gen_masked_store(i32)
|
||||
gen_masked_store(i64)
|
||||
masked_store_float_double()
|
||||
|
||||
define void @__masked_store_blend_i8(<WIDTH x i8>* nocapture %ptr, <WIDTH x i8> %new,
|
||||
<WIDTH x MASK> %mask) nounwind alwaysinline {
|
||||
%old = load PTR_OP_ARGS(`<WIDTH x i8> ') %ptr
|
||||
%mask1 = trunc <WIDTH x MASK> %mask to <WIDTH x i1>
|
||||
%result = select <WIDTH x i1> %mask1, <WIDTH x i8> %new, <WIDTH x i8> %old
|
||||
store <WIDTH x i8> %result, <WIDTH x i8> * %ptr
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @__masked_store_blend_i16(<WIDTH x i16>* nocapture %ptr, <WIDTH x i16> %new,
|
||||
<WIDTH x MASK> %mask) nounwind alwaysinline {
|
||||
%old = load PTR_OP_ARGS(`<WIDTH x i16> ') %ptr
|
||||
%mask1 = trunc <WIDTH x MASK> %mask to <WIDTH x i1>
|
||||
%result = select <WIDTH x i1> %mask1, <WIDTH x i16> %new, <WIDTH x i16> %old
|
||||
store <WIDTH x i16> %result, <WIDTH x i16> * %ptr
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @__masked_store_blend_i32(<WIDTH x i32>* nocapture %ptr, <WIDTH x i32> %new,
|
||||
<WIDTH x MASK> %mask) nounwind alwaysinline {
|
||||
%old = load PTR_OP_ARGS(`<WIDTH x i32> ') %ptr
|
||||
%mask1 = trunc <WIDTH x MASK> %mask to <WIDTH x i1>
|
||||
%result = select <WIDTH x i1> %mask1, <WIDTH x i32> %new, <WIDTH x i32> %old
|
||||
store <WIDTH x i32> %result, <WIDTH x i32> * %ptr
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @__masked_store_blend_i64(<WIDTH x i64>* nocapture %ptr,
|
||||
<WIDTH x i64> %new, <WIDTH x MASK> %mask) nounwind alwaysinline {
|
||||
%old = load PTR_OP_ARGS(`<WIDTH x i64> ') %ptr
|
||||
%mask1 = trunc <WIDTH x MASK> %mask to <WIDTH x i1>
|
||||
%result = select <WIDTH x i1> %mask1, <WIDTH x i64> %new, <WIDTH x i64> %old
|
||||
store <WIDTH x i64> %result, <WIDTH x i64> * %ptr
|
||||
ret void
|
||||
}
|
||||
|
||||
;; yuck. We need declarations of these, even though we shouldnt ever
|
||||
;; actually generate calls to them for the NEON target...
|
||||
|
||||
|
||||
include(`svml.m4')
|
||||
svml_stubs(float,f,WIDTH)
|
||||
svml_stubs(double,d,WIDTH)
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; gather
|
||||
|
||||
gen_gather_factored(i8)
|
||||
gen_gather_factored(i16)
|
||||
gen_gather_factored(i32)
|
||||
gen_gather_factored(float)
|
||||
gen_gather_factored(i64)
|
||||
gen_gather_factored(double)
|
||||
|
||||
gen_scatter(i8)
|
||||
gen_scatter(i16)
|
||||
gen_scatter(i32)
|
||||
gen_scatter(float)
|
||||
gen_scatter(i64)
|
||||
gen_scatter(double)
|
||||
|
||||
packed_load_and_store(4)
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; prefetch
|
||||
|
||||
define_prefetches()
|
||||
declare_nvptx()
|
||||
2371
builtins/target-nvptx.ll
Normal file
2371
builtins/target-nvptx.ll
Normal file
File diff suppressed because it is too large
Load Diff
94
builtins/target-skx.ll
Normal file
94
builtins/target-skx.ll
Normal file
@@ -0,0 +1,94 @@
|
||||
;; Copyright (c) 2016, Intel Corporation
|
||||
;; All rights reserved.
|
||||
;;
|
||||
;; Redistribution and use in source and binary forms, with or without
|
||||
;; modification, are permitted provided that the following conditions are
|
||||
;; met:
|
||||
;;
|
||||
;; * Redistributions of source code must retain the above copyright
|
||||
;; notice, this list of conditions and the following disclaimer.
|
||||
;;
|
||||
;; * Redistributions in binary form must reproduce the above copyright
|
||||
;; notice, this list of conditions and the following disclaimer in the
|
||||
;; documentation and/or other materials provided with the distribution.
|
||||
;;
|
||||
;; * Neither the name of Intel Corporation nor the names of its
|
||||
;; contributors may be used to endorse or promote products derived from
|
||||
;; this software without specific prior written permission.
|
||||
;;
|
||||
;;
|
||||
;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
||||
;; IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
||||
;; TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
|
||||
;; PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
|
||||
;; OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||
;; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
;; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
;; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
;; LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
;; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
;; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
define(`WIDTH',`16')
|
||||
|
||||
|
||||
ifelse(LLVM_VERSION, LLVM_3_8,
|
||||
`include(`target-avx512-common.ll')',
|
||||
LLVM_VERSION, LLVM_3_9,
|
||||
`include(`target-avx512-common.ll')',
|
||||
LLVM_VERSION, LLVM_4_0,
|
||||
`include(`target-avx512-common.ll')',
|
||||
LLVM_VERSION, LLVM_5_0,
|
||||
`include(`target-avx512-common.ll')'
|
||||
)
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; rcp, rsqrt
|
||||
|
||||
define(`rcp_rsqrt_varying_float_skx',`
|
||||
declare <16 x float> @llvm.x86.avx512.rcp14.ps.512(<16 x float>, <16 x float>, i16) nounwind readnone
|
||||
define <16 x float> @__rcp_varying_float(<16 x float>) nounwind readonly alwaysinline {
|
||||
%call = call <16 x float> @llvm.x86.avx512.rcp14.ps.512(<16 x float> %0, <16 x float> undef, i16 -1)
|
||||
;; do one Newton-Raphson iteration to improve precision
|
||||
;; float iv = __rcp_v(v);
|
||||
;; return iv * (2. - v * iv);
|
||||
%v_iv = fmul <16 x float> %0`,' %call
|
||||
%two_minus = fsub <16 x float> <float 2.`,' float 2.`,' float 2.`,' float 2.`,'
|
||||
float 2.`,' float 2.`,' float 2.`,' float 2.`,'
|
||||
float 2.`,' float 2.`,' float 2.`,' float 2.`,'
|
||||
float 2.`,' float 2.`,' float 2.`,' float 2.>`,' %v_iv
|
||||
%iv_mul = fmul <16 x float> %call`,' %two_minus
|
||||
ret <16 x float> %iv_mul
|
||||
}
|
||||
declare <16 x float> @llvm.x86.avx512.rsqrt14.ps.512(<16 x float>`,' <16 x float>`,' i16) nounwind readnone
|
||||
define <16 x float> @__rsqrt_varying_float(<16 x float> %v) nounwind readonly alwaysinline {
|
||||
%is = call <16 x float> @llvm.x86.avx512.rsqrt14.ps.512(<16 x float> %v`,' <16 x float> undef`,' i16 -1)
|
||||
; Newton-Raphson iteration to improve precision
|
||||
; float is = __rsqrt_v(v);
|
||||
; return 0.5 * is * (3. - (v * is) * is);
|
||||
%v_is = fmul <16 x float> %v`,' %is
|
||||
%v_is_is = fmul <16 x float> %v_is`,' %is
|
||||
%three_sub = fsub <16 x float> <float 3.`,' float 3.`,' float 3.`,' float 3.`,'
|
||||
float 3.`,' float 3.`,' float 3.`,' float 3.`,'
|
||||
float 3.`,' float 3.`,' float 3.`,' float 3.`,'
|
||||
float 3.`,' float 3.`,' float 3.`,' float 3.>`,' %v_is_is
|
||||
%is_mul = fmul <16 x float> %is`,' %three_sub
|
||||
%half_scale = fmul <16 x float> <float 0.5`,' float 0.5`,' float 0.5`,' float 0.5`,'
|
||||
float 0.5`,' float 0.5`,' float 0.5`,' float 0.5`,'
|
||||
float 0.5`,' float 0.5`,' float 0.5`,' float 0.5`,'
|
||||
float 0.5`,' float 0.5`,' float 0.5`,' float 0.5>`,' %is_mul
|
||||
ret <16 x float> %half_scale
|
||||
}
|
||||
')
|
||||
|
||||
ifelse(LLVM_VERSION, LLVM_3_8,
|
||||
rcp_rsqrt_varying_float_skx(),
|
||||
LLVM_VERSION, LLVM_3_9,
|
||||
rcp_rsqrt_varying_float_skx(),
|
||||
LLVM_VERSION, LLVM_4_0,
|
||||
rcp_rsqrt_varying_float_skx(),
|
||||
LLVM_VERSION, LLVM_5_0,
|
||||
rcp_rsqrt_varying_float_skx()
|
||||
)
|
||||
|
||||
;;saturation_arithmetic_novec()
|
||||
@@ -1,4 +1,4 @@
|
||||
;; Copyright (c) 2010-2011, Intel Corporation
|
||||
;; Copyright (c) 2010-2015, Intel Corporation
|
||||
;; All rights reserved.
|
||||
;;
|
||||
;; Redistribution and use in source and binary forms, with or without
|
||||
@@ -33,6 +33,7 @@ ctlztz()
|
||||
define_prefetches()
|
||||
define_shuffles()
|
||||
aossoa()
|
||||
rdrand_decls()
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; rcp
|
||||
@@ -96,7 +97,7 @@ define void @__fastmath() nounwind alwaysinline {
|
||||
%ptr = alloca i32
|
||||
%ptr8 = bitcast i32 * %ptr to i8 *
|
||||
call void @llvm.x86.sse.stmxcsr(i8 * %ptr8)
|
||||
%oldval = load i32 *%ptr
|
||||
%oldval = load PTR_OP_ARGS(`i32 ') %ptr
|
||||
|
||||
; turn on DAZ (64)/FTZ (32768) -> 32832
|
||||
%update = or i32 %oldval, 32832
|
||||
@@ -268,4 +269,9 @@ define i64 @__popcnt_int64(i64) nounwind readnone alwaysinline {
|
||||
ret i64 %val
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; int8/int16 builtins
|
||||
|
||||
define_avgs()
|
||||
|
||||
declare_nvptx()
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
;; Copyright (c) 2010-2011, Intel Corporation
|
||||
;; Copyright (c) 2010-2015, Intel Corporation
|
||||
;; All rights reserved.
|
||||
;;
|
||||
;; Redistribution and use in source and binary forms, with or without
|
||||
@@ -44,9 +44,18 @@ stdlib_core()
|
||||
packed_load_and_store()
|
||||
scans()
|
||||
int64minmax()
|
||||
saturation_arithmetic()
|
||||
|
||||
include(`target-sse2-common.ll')
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; half conversion routines
|
||||
|
||||
declare float @__half_to_float_uniform(i16 %v) nounwind readnone
|
||||
declare <WIDTH x float> @__half_to_float_varying(<WIDTH x i16> %v) nounwind readnone
|
||||
declare i16 @__float_to_half_uniform(float %v) nounwind readnone
|
||||
declare <WIDTH x i16> @__float_to_half_varying(<WIDTH x float> %v) nounwind readnone
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; rcp
|
||||
|
||||
@@ -97,87 +106,14 @@ define <8 x float> @__sqrt_varying_float(<8 x float>) nounwind readonly alwaysin
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
; svml stuff
|
||||
|
||||
declare <4 x float> @__svml_sinf4(<4 x float>) nounwind readnone
|
||||
declare <4 x float> @__svml_cosf4(<4 x float>) nounwind readnone
|
||||
declare <4 x float> @__svml_sincosf4(<4 x float> *, <4 x float>) nounwind readnone
|
||||
declare <4 x float> @__svml_tanf4(<4 x float>) nounwind readnone
|
||||
declare <4 x float> @__svml_atanf4(<4 x float>) nounwind readnone
|
||||
declare <4 x float> @__svml_atan2f4(<4 x float>, <4 x float>) nounwind readnone
|
||||
declare <4 x float> @__svml_expf4(<4 x float>) nounwind readnone
|
||||
declare <4 x float> @__svml_logf4(<4 x float>) nounwind readnone
|
||||
declare <4 x float> @__svml_powf4(<4 x float>, <4 x float>) nounwind readnone
|
||||
include(`svml.m4')
|
||||
;; single precision
|
||||
svml_declare(float,f4,4)
|
||||
svml_define_x(float,f4,4,f,8)
|
||||
|
||||
|
||||
define <8 x float> @__svml_sin(<8 x float>) nounwind readnone alwaysinline {
|
||||
unary4to8(ret, float, @__svml_sinf4, %0)
|
||||
ret <8 x float> %ret
|
||||
}
|
||||
|
||||
define <8 x float> @__svml_cos(<8 x float>) nounwind readnone alwaysinline {
|
||||
unary4to8(ret, float, @__svml_cosf4, %0)
|
||||
ret <8 x float> %ret
|
||||
}
|
||||
|
||||
define void @__svml_sincos(<8 x float>, <8 x float> *,
|
||||
<8 x float> *) nounwind readnone alwaysinline {
|
||||
; call svml_sincosf4 two times with the two 4-wide sub-vectors
|
||||
%a = shufflevector <8 x float> %0, <8 x float> undef,
|
||||
<4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
||||
%b = shufflevector <8 x float> %0, <8 x float> undef,
|
||||
<4 x i32> <i32 4, i32 5, i32 6, i32 7>
|
||||
|
||||
%cospa = alloca <4 x float>
|
||||
%sa = call <4 x float> @__svml_sincosf4(<4 x float> * %cospa, <4 x float> %a)
|
||||
|
||||
%cospb = alloca <4 x float>
|
||||
%sb = call <4 x float> @__svml_sincosf4(<4 x float> * %cospb, <4 x float> %b)
|
||||
|
||||
%sin = shufflevector <4 x float> %sa, <4 x float> %sb,
|
||||
<8 x i32> <i32 0, i32 1, i32 2, i32 3,
|
||||
i32 4, i32 5, i32 6, i32 7>
|
||||
store <8 x float> %sin, <8 x float> * %1
|
||||
|
||||
%cosa = load <4 x float> * %cospa
|
||||
%cosb = load <4 x float> * %cospb
|
||||
%cos = shufflevector <4 x float> %cosa, <4 x float> %cosb,
|
||||
<8 x i32> <i32 0, i32 1, i32 2, i32 3,
|
||||
i32 4, i32 5, i32 6, i32 7>
|
||||
store <8 x float> %cos, <8 x float> * %2
|
||||
|
||||
ret void
|
||||
}
|
||||
|
||||
define <8 x float> @__svml_tan(<8 x float>) nounwind readnone alwaysinline {
|
||||
unary4to8(ret, float, @__svml_tanf4, %0)
|
||||
ret <8 x float> %ret
|
||||
}
|
||||
|
||||
define <8 x float> @__svml_atan(<8 x float>) nounwind readnone alwaysinline {
|
||||
unary4to8(ret, float, @__svml_atanf4, %0)
|
||||
ret <8 x float> %ret
|
||||
}
|
||||
|
||||
define <8 x float> @__svml_atan2(<8 x float>,
|
||||
<8 x float>) nounwind readnone alwaysinline {
|
||||
binary4to8(ret, float, @__svml_atan2f4, %0, %1)
|
||||
ret <8 x float> %ret
|
||||
}
|
||||
|
||||
define <8 x float> @__svml_exp(<8 x float>) nounwind readnone alwaysinline {
|
||||
unary4to8(ret, float, @__svml_expf4, %0)
|
||||
ret <8 x float> %ret
|
||||
}
|
||||
|
||||
define <8 x float> @__svml_log(<8 x float>) nounwind readnone alwaysinline {
|
||||
unary4to8(ret, float, @__svml_logf4, %0)
|
||||
ret <8 x float> %ret
|
||||
}
|
||||
|
||||
define <8 x float> @__svml_pow(<8 x float>,
|
||||
<8 x float>) nounwind readnone alwaysinline {
|
||||
binary4to8(ret, float, @__svml_powf4, %0, %1)
|
||||
ret <8 x float> %ret
|
||||
}
|
||||
;; double precision
|
||||
svml_declare(double,2,2)
|
||||
svml_define_x(double,2,2,d,8)
|
||||
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
@@ -287,7 +223,7 @@ define i32 @__max_uniform_uint32(i32, i32) nounwind readonly alwaysinline {
|
||||
|
||||
declare i32 @llvm.x86.sse.movmsk.ps(<4 x float>) nounwind readnone
|
||||
|
||||
define i32 @__movmsk(<8 x i32>) nounwind readnone alwaysinline {
|
||||
define i64 @__movmsk(<8 x i32>) nounwind readnone alwaysinline {
|
||||
; first do two 4-wide movmsk calls
|
||||
%floatmask = bitcast <8 x i32> %0 to <8 x float>
|
||||
%m0 = shufflevector <8 x float> %floatmask, <8 x float> undef,
|
||||
@@ -301,7 +237,92 @@ define i32 @__movmsk(<8 x i32>) nounwind readnone alwaysinline {
|
||||
; of the second one
|
||||
%v1s = shl i32 %v1, 4
|
||||
%v = or i32 %v0, %v1s
|
||||
ret i32 %v
|
||||
%v64 = zext i32 %v to i64
|
||||
ret i64 %v64
|
||||
}
|
||||
|
||||
define i1 @__any(<8 x i32>) nounwind readnone alwaysinline {
|
||||
; first do two 4-wide movmsk calls
|
||||
%floatmask = bitcast <8 x i32> %0 to <8 x float>
|
||||
%m0 = shufflevector <8 x float> %floatmask, <8 x float> undef,
|
||||
<4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
||||
%v0 = call i32 @llvm.x86.sse.movmsk.ps(<4 x float> %m0) nounwind readnone
|
||||
%m1 = shufflevector <8 x float> %floatmask, <8 x float> undef,
|
||||
<4 x i32> <i32 4, i32 5, i32 6, i32 7>
|
||||
%v1 = call i32 @llvm.x86.sse.movmsk.ps(<4 x float> %m1) nounwind readnone
|
||||
|
||||
; and shift the first one over by 4 before ORing it with the value
|
||||
; of the second one
|
||||
%v1s = shl i32 %v1, 4
|
||||
%v = or i32 %v0, %v1s
|
||||
%cmp = icmp ne i32 %v, 0
|
||||
ret i1 %cmp
|
||||
}
|
||||
|
||||
define i1 @__all(<8 x i32>) nounwind readnone alwaysinline {
|
||||
; first do two 4-wide movmsk calls
|
||||
%floatmask = bitcast <8 x i32> %0 to <8 x float>
|
||||
%m0 = shufflevector <8 x float> %floatmask, <8 x float> undef,
|
||||
<4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
||||
%v0 = call i32 @llvm.x86.sse.movmsk.ps(<4 x float> %m0) nounwind readnone
|
||||
%m1 = shufflevector <8 x float> %floatmask, <8 x float> undef,
|
||||
<4 x i32> <i32 4, i32 5, i32 6, i32 7>
|
||||
%v1 = call i32 @llvm.x86.sse.movmsk.ps(<4 x float> %m1) nounwind readnone
|
||||
|
||||
; and shift the first one over by 4 before ORing it with the value
|
||||
; of the second one
|
||||
%v1s = shl i32 %v1, 4
|
||||
%v = or i32 %v0, %v1s
|
||||
%cmp = icmp eq i32 %v, 255
|
||||
ret i1 %cmp
|
||||
}
|
||||
|
||||
define i1 @__none(<8 x i32>) nounwind readnone alwaysinline {
|
||||
; first do two 4-wide movmsk calls
|
||||
%floatmask = bitcast <8 x i32> %0 to <8 x float>
|
||||
%m0 = shufflevector <8 x float> %floatmask, <8 x float> undef,
|
||||
<4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
||||
%v0 = call i32 @llvm.x86.sse.movmsk.ps(<4 x float> %m0) nounwind readnone
|
||||
%m1 = shufflevector <8 x float> %floatmask, <8 x float> undef,
|
||||
<4 x i32> <i32 4, i32 5, i32 6, i32 7>
|
||||
%v1 = call i32 @llvm.x86.sse.movmsk.ps(<4 x float> %m1) nounwind readnone
|
||||
|
||||
; and shift the first one over by 4 before ORing it with the value
|
||||
; of the second one
|
||||
%v1s = shl i32 %v1, 4
|
||||
%v = or i32 %v0, %v1s
|
||||
%cmp = icmp eq i32 %v, 0
|
||||
ret i1 %cmp
|
||||
}
|
||||
|
||||
declare <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8>, <16 x i8>) nounwind readnone
|
||||
|
||||
define i16 @__reduce_add_int8(<8 x i8>) nounwind readnone alwaysinline {
|
||||
%wide8 = shufflevector <8 x i8> %0, <8 x i8> zeroinitializer,
|
||||
<16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
|
||||
i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8>
|
||||
%rv = call <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8> %wide8,
|
||||
<16 x i8> zeroinitializer)
|
||||
%r0 = extractelement <2 x i64> %rv, i32 0
|
||||
%r1 = extractelement <2 x i64> %rv, i32 1
|
||||
%r = add i64 %r0, %r1
|
||||
%r16 = trunc i64 %r to i16
|
||||
ret i16 %r16
|
||||
}
|
||||
|
||||
define internal <8 x i16> @__add_varying_i16(<8 x i16>,
|
||||
<8 x i16>) nounwind readnone alwaysinline {
|
||||
%r = add <8 x i16> %0, %1
|
||||
ret <8 x i16> %r
|
||||
}
|
||||
|
||||
define internal i16 @__add_uniform_i16(i16, i16) nounwind readnone alwaysinline {
|
||||
%r = add i16 %0, %1
|
||||
ret i16 %r
|
||||
}
|
||||
|
||||
define i16 @__reduce_add_int16(<8 x i16>) nounwind readnone alwaysinline {
|
||||
reduce8(i16, @__add_varying_i16, @__add_uniform_i16)
|
||||
}
|
||||
|
||||
define <4 x float> @__vec4_add_float(<4 x float> %v0,
|
||||
@@ -352,11 +373,6 @@ define i32 @__reduce_max_int32(<8 x i32>) nounwind readnone alwaysinline {
|
||||
reduce8(i32, @__max_varying_int32, @__max_uniform_int32)
|
||||
}
|
||||
|
||||
define i32 @__reduce_add_uint32(<8 x i32> %v) nounwind readnone alwaysinline {
|
||||
%r = call i32 @__reduce_add_int32(<8 x i32> %v)
|
||||
ret i32 %r
|
||||
}
|
||||
|
||||
define i32 @__reduce_min_uint32(<8 x i32>) nounwind readnone alwaysinline {
|
||||
reduce8(i32, @__min_varying_uint32, @__min_uniform_uint32)
|
||||
}
|
||||
@@ -389,7 +405,7 @@ define double @__reduce_max_double(<8 x double>) nounwind readnone {
|
||||
}
|
||||
|
||||
define <4 x i64> @__add_varying_int64(<4 x i64>,
|
||||
<4 x i64>) nounwind readnone alwaysinline {
|
||||
<4 x i64>) nounwind readnone alwaysinline {
|
||||
%r = add <4 x i64> %0, %1
|
||||
ret <4 x i64> %r
|
||||
}
|
||||
@@ -424,28 +440,30 @@ reduce_equal(8)
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; unaligned loads/loads+broadcasts
|
||||
|
||||
load_and_broadcast(8, i8, 8)
|
||||
load_and_broadcast(8, i16, 16)
|
||||
load_and_broadcast(8, i32, 32)
|
||||
load_and_broadcast(8, i64, 64)
|
||||
|
||||
masked_load(8, i8, 8, 1)
|
||||
masked_load(8, i16, 16, 2)
|
||||
masked_load(8, i32, 32, 4)
|
||||
masked_load(8, i64, 64, 8)
|
||||
masked_load(i8, 1)
|
||||
masked_load(i16, 2)
|
||||
masked_load(i32, 4)
|
||||
masked_load(float, 4)
|
||||
masked_load(i64, 8)
|
||||
masked_load(double, 8)
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; gather/scatter
|
||||
|
||||
gen_gather(8, i8)
|
||||
gen_gather(8, i16)
|
||||
gen_gather(8, i32)
|
||||
gen_gather(8, i64)
|
||||
gen_gather_factored(i8)
|
||||
gen_gather_factored(i16)
|
||||
gen_gather_factored(i32)
|
||||
gen_gather_factored(float)
|
||||
gen_gather_factored(i64)
|
||||
gen_gather_factored(double)
|
||||
|
||||
gen_scatter(8, i8)
|
||||
gen_scatter(8, i16)
|
||||
gen_scatter(8, i32)
|
||||
gen_scatter(8, i64)
|
||||
gen_scatter(i8)
|
||||
gen_scatter(i16)
|
||||
gen_scatter(i32)
|
||||
gen_scatter(float)
|
||||
gen_scatter(i64)
|
||||
gen_scatter(double)
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; float rounding
|
||||
@@ -549,24 +567,24 @@ define <8 x double> @__ceil_varying_double(<8 x double>) nounwind readonly alway
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; masked store
|
||||
|
||||
gen_masked_store(8, i8, 8)
|
||||
gen_masked_store(8, i16, 16)
|
||||
gen_masked_store(8, i32, 32)
|
||||
gen_masked_store(8, i64, 64)
|
||||
gen_masked_store(i8)
|
||||
gen_masked_store(i16)
|
||||
gen_masked_store(i32)
|
||||
gen_masked_store(i64)
|
||||
|
||||
masked_store_blend_8_16_by_8()
|
||||
|
||||
define void @__masked_store_blend_32(<8 x i32>* nocapture, <8 x i32>,
|
||||
<8 x i32> %mask) nounwind alwaysinline {
|
||||
%val = load <8 x i32> * %0, align 4
|
||||
define void @__masked_store_blend_i32(<8 x i32>* nocapture, <8 x i32>,
|
||||
<8 x i32> %mask) nounwind alwaysinline {
|
||||
%val = load PTR_OP_ARGS(`<8 x i32> ') %0, align 4
|
||||
%newval = call <8 x i32> @__vselect_i32(<8 x i32> %val, <8 x i32> %1, <8 x i32> %mask)
|
||||
store <8 x i32> %newval, <8 x i32> * %0, align 4
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @__masked_store_blend_64(<8 x i64>* nocapture %ptr, <8 x i64> %new,
|
||||
<8 x i32> %mask) nounwind alwaysinline {
|
||||
%oldValue = load <8 x i64>* %ptr, align 8
|
||||
define void @__masked_store_blend_i64(<8 x i64>* nocapture %ptr, <8 x i64> %new,
|
||||
<8 x i32> %mask) nounwind alwaysinline {
|
||||
%oldValue = load PTR_OP_ARGS(`<8 x i64>') %ptr, align 8
|
||||
|
||||
; Do 8x64-bit blends by doing two <8 x i32> blends, where the <8 x i32> values
|
||||
; are actually bitcast <2 x i64> values
|
||||
@@ -608,6 +626,8 @@ define void @__masked_store_blend_64(<8 x i64>* nocapture %ptr, <8 x i64> %new,
|
||||
ret void
|
||||
}
|
||||
|
||||
masked_store_float_double()
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; double precision sqrt
|
||||
|
||||
@@ -633,3 +653,12 @@ define <8 x double> @__max_varying_double(<8 x double>, <8 x double>) nounwind r
|
||||
binary2to8(ret, double, @llvm.x86.sse2.max.pd, %0, %1)
|
||||
ret <8 x double> %ret
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; reciprocals in double precision, if supported
|
||||
|
||||
rsqrtd_decl()
|
||||
rcpd_decl()
|
||||
|
||||
transcendetals_decl()
|
||||
trigonometry_decl()
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
;; Copyright (c) 2010-2011, Intel Corporation
|
||||
;; Copyright (c) 2010-2015, Intel Corporation
|
||||
;; All rights reserved.
|
||||
;;
|
||||
;; Redistribution and use in source and binary forms, with or without
|
||||
@@ -41,9 +41,18 @@ stdlib_core()
|
||||
packed_load_and_store()
|
||||
scans()
|
||||
int64minmax()
|
||||
saturation_arithmetic()
|
||||
|
||||
include(`target-sse2-common.ll')
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; half conversion routines
|
||||
|
||||
declare float @__half_to_float_uniform(i16 %v) nounwind readnone
|
||||
declare <WIDTH x float> @__half_to_float_varying(<WIDTH x i16> %v) nounwind readnone
|
||||
declare i16 @__float_to_half_uniform(float %v) nounwind readnone
|
||||
declare <WIDTH x i16> @__float_to_half_varying(<WIDTH x float> %v) nounwind readnone
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; rounding
|
||||
;;
|
||||
@@ -231,10 +240,62 @@ define i32 @__max_uniform_uint32(i32, i32) nounwind readonly alwaysinline {
|
||||
|
||||
declare i32 @llvm.x86.sse.movmsk.ps(<4 x float>) nounwind readnone
|
||||
|
||||
define i32 @__movmsk(<4 x i32>) nounwind readnone alwaysinline {
|
||||
define i64 @__movmsk(<4 x i32>) nounwind readnone alwaysinline {
|
||||
%floatmask = bitcast <4 x i32> %0 to <4 x float>
|
||||
%v = call i32 @llvm.x86.sse.movmsk.ps(<4 x float> %floatmask) nounwind readnone
|
||||
ret i32 %v
|
||||
%v64 = zext i32 %v to i64
|
||||
ret i64 %v64
|
||||
}
|
||||
|
||||
define i1 @__any(<4 x i32>) nounwind readnone alwaysinline {
|
||||
%floatmask = bitcast <4 x i32> %0 to <4 x float>
|
||||
%v = call i32 @llvm.x86.sse.movmsk.ps(<4 x float> %floatmask) nounwind readnone
|
||||
%cmp = icmp ne i32 %v, 0
|
||||
ret i1 %cmp
|
||||
}
|
||||
|
||||
define i1 @__all(<4 x i32>) nounwind readnone alwaysinline {
|
||||
%floatmask = bitcast <4 x i32> %0 to <4 x float>
|
||||
%v = call i32 @llvm.x86.sse.movmsk.ps(<4 x float> %floatmask) nounwind readnone
|
||||
%cmp = icmp eq i32 %v, 15
|
||||
ret i1 %cmp
|
||||
}
|
||||
|
||||
define i1 @__none(<4 x i32>) nounwind readnone alwaysinline {
|
||||
%floatmask = bitcast <4 x i32> %0 to <4 x float>
|
||||
%v = call i32 @llvm.x86.sse.movmsk.ps(<4 x float> %floatmask) nounwind readnone
|
||||
%cmp = icmp eq i32 %v, 0
|
||||
ret i1 %cmp
|
||||
}
|
||||
|
||||
declare <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8>, <16 x i8>) nounwind readnone
|
||||
|
||||
define i16 @__reduce_add_int8(<4 x i8>) nounwind readnone alwaysinline {
|
||||
%wide8 = shufflevector <4 x i8> %0, <4 x i8> zeroinitializer,
|
||||
<16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 4, i32 4, i32 4,
|
||||
i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>
|
||||
%rv = call <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8> %wide8,
|
||||
<16 x i8> zeroinitializer)
|
||||
%r0 = extractelement <2 x i64> %rv, i32 0
|
||||
%r1 = extractelement <2 x i64> %rv, i32 1
|
||||
%r = add i64 %r0, %r1
|
||||
%r16 = trunc i64 %r to i16
|
||||
ret i16 %r16
|
||||
}
|
||||
|
||||
define internal <4 x i16> @__add_varying_i16(<4 x i16>,
|
||||
<4 x i16>) nounwind readnone alwaysinline {
|
||||
%r = add <4 x i16> %0, %1
|
||||
ret <4 x i16> %r
|
||||
}
|
||||
|
||||
define internal i16 @__add_uniform_i16(i16, i16) nounwind readnone alwaysinline {
|
||||
%r = add i16 %0, %1
|
||||
ret i16 %r
|
||||
}
|
||||
|
||||
define i16 @__reduce_add_int16(<4 x i16>) nounwind readnone alwaysinline {
|
||||
reduce4(i16, @__add_varying_i16, @__add_uniform_i16)
|
||||
}
|
||||
|
||||
define float @__reduce_add_float(<4 x float> %v) nounwind readonly alwaysinline {
|
||||
@@ -273,18 +334,13 @@ define i32 @__reduce_max_int32(<4 x i32>) nounwind readnone {
|
||||
reduce4(i32, @__max_varying_int32, @__max_uniform_int32)
|
||||
}
|
||||
|
||||
define i32 @__reduce_add_uint32(<4 x i32> %v) nounwind readnone {
|
||||
%r = call i32 @__reduce_add_int32(<4 x i32> %v)
|
||||
ret i32 %r
|
||||
}
|
||||
|
||||
define i32 @__reduce_min_uint32(<4 x i32>) nounwind readnone {
|
||||
reduce4(i32, @__min_varying_uint32, @__min_uniform_uint32)
|
||||
}
|
||||
|
||||
define i32 @__reduce_max_uint32(<4 x i32>) nounwind readnone {
|
||||
reduce4(i32, @__max_varying_uint32, @__max_uniform_uint32)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
define double @__reduce_add_double(<4 x double>) nounwind readnone {
|
||||
@@ -341,17 +397,17 @@ reduce_equal(4)
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; masked store
|
||||
|
||||
define void @__masked_store_blend_32(<4 x i32>* nocapture, <4 x i32>,
|
||||
<4 x i32> %mask) nounwind alwaysinline {
|
||||
%val = load <4 x i32> * %0, align 4
|
||||
define void @__masked_store_blend_i32(<4 x i32>* nocapture, <4 x i32>,
|
||||
<4 x i32> %mask) nounwind alwaysinline {
|
||||
%val = load PTR_OP_ARGS(`<4 x i32> ') %0, align 4
|
||||
%newval = call <4 x i32> @__vselect_i32(<4 x i32> %val, <4 x i32> %1, <4 x i32> %mask)
|
||||
store <4 x i32> %newval, <4 x i32> * %0, align 4
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @__masked_store_blend_64(<4 x i64>* nocapture %ptr, <4 x i64> %new,
|
||||
<4 x i32> %mask) nounwind alwaysinline {
|
||||
%oldValue = load <4 x i64>* %ptr, align 8
|
||||
define void @__masked_store_blend_i64(<4 x i64>* nocapture %ptr, <4 x i64> %new,
|
||||
<4 x i32> %mask) nounwind alwaysinline {
|
||||
%oldValue = load PTR_OP_ARGS(`<4 x i64>') %ptr, align 8
|
||||
|
||||
; Do 4x64-bit blends by doing two <4 x i32> blends, where the <4 x i32> values
|
||||
; are actually bitcast <2 x i64> values
|
||||
@@ -392,6 +448,8 @@ define void @__masked_store_blend_64(<4 x i64>* nocapture %ptr, <4 x i64> %new,
|
||||
}
|
||||
|
||||
|
||||
masked_store_float_double()
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; rcp
|
||||
|
||||
@@ -439,62 +497,15 @@ define <4 x float> @__sqrt_varying_float(<4 x float>) nounwind readonly alwaysin
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
; svml stuff
|
||||
|
||||
declare <4 x float> @__svml_sinf4(<4 x float>) nounwind readnone
|
||||
declare <4 x float> @__svml_cosf4(<4 x float>) nounwind readnone
|
||||
declare <4 x float> @__svml_sincosf4(<4 x float> *, <4 x float>) nounwind readnone
|
||||
declare <4 x float> @__svml_tanf4(<4 x float>) nounwind readnone
|
||||
declare <4 x float> @__svml_atanf4(<4 x float>) nounwind readnone
|
||||
declare <4 x float> @__svml_atan2f4(<4 x float>, <4 x float>) nounwind readnone
|
||||
declare <4 x float> @__svml_expf4(<4 x float>) nounwind readnone
|
||||
declare <4 x float> @__svml_logf4(<4 x float>) nounwind readnone
|
||||
declare <4 x float> @__svml_powf4(<4 x float>, <4 x float>) nounwind readnone
|
||||
include(`svml.m4')
|
||||
;; single precision
|
||||
svml_declare(float,f4,4)
|
||||
svml_define(float,f4,4,f)
|
||||
|
||||
;; double precision
|
||||
svml_declare(double,2,2)
|
||||
svml_define_x(double,2,2,d,4)
|
||||
|
||||
define <4 x float> @__svml_sin(<4 x float>) nounwind readnone alwaysinline {
|
||||
%ret = call <4 x float> @__svml_sinf4(<4 x float> %0)
|
||||
ret <4 x float> %ret
|
||||
}
|
||||
|
||||
define <4 x float> @__svml_cos(<4 x float>) nounwind readnone alwaysinline {
|
||||
%ret = call <4 x float> @__svml_cosf4(<4 x float> %0)
|
||||
ret <4 x float> %ret
|
||||
}
|
||||
|
||||
define void @__svml_sincos(<4 x float>, <4 x float> *, <4 x float> *) nounwind readnone alwaysinline {
|
||||
%s = call <4 x float> @__svml_sincosf4(<4 x float> * %2, <4 x float> %0)
|
||||
store <4 x float> %s, <4 x float> * %1
|
||||
ret void
|
||||
}
|
||||
|
||||
define <4 x float> @__svml_tan(<4 x float>) nounwind readnone alwaysinline {
|
||||
%ret = call <4 x float> @__svml_tanf4(<4 x float> %0)
|
||||
ret <4 x float> %ret
|
||||
}
|
||||
|
||||
define <4 x float> @__svml_atan(<4 x float>) nounwind readnone alwaysinline {
|
||||
%ret = call <4 x float> @__svml_atanf4(<4 x float> %0)
|
||||
ret <4 x float> %ret
|
||||
}
|
||||
|
||||
define <4 x float> @__svml_atan2(<4 x float>, <4 x float>) nounwind readnone alwaysinline {
|
||||
%ret = call <4 x float> @__svml_atan2f4(<4 x float> %0, <4 x float> %1)
|
||||
ret <4 x float> %ret
|
||||
}
|
||||
|
||||
define <4 x float> @__svml_exp(<4 x float>) nounwind readnone alwaysinline {
|
||||
%ret = call <4 x float> @__svml_expf4(<4 x float> %0)
|
||||
ret <4 x float> %ret
|
||||
}
|
||||
|
||||
define <4 x float> @__svml_log(<4 x float>) nounwind readnone alwaysinline {
|
||||
%ret = call <4 x float> @__svml_logf4(<4 x float> %0)
|
||||
ret <4 x float> %ret
|
||||
}
|
||||
|
||||
define <4 x float> @__svml_pow(<4 x float>, <4 x float>) nounwind readnone alwaysinline {
|
||||
%ret = call <4 x float> @__svml_powf4(<4 x float> %0, <4 x float> %1)
|
||||
ret <4 x float> %ret
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; float min/max
|
||||
@@ -543,35 +554,46 @@ define <4 x double> @__max_varying_double(<4 x double>, <4 x double>) nounwind r
|
||||
|
||||
masked_store_blend_8_16_by_4()
|
||||
|
||||
gen_masked_store(4, i8, 8)
|
||||
gen_masked_store(4, i16, 16)
|
||||
gen_masked_store(4, i32, 32)
|
||||
gen_masked_store(4, i64, 64)
|
||||
gen_masked_store(i8)
|
||||
gen_masked_store(i16)
|
||||
gen_masked_store(i32)
|
||||
gen_masked_store(i64)
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; unaligned loads/loads+broadcasts
|
||||
|
||||
load_and_broadcast(4, i8, 8)
|
||||
load_and_broadcast(4, i16, 16)
|
||||
load_and_broadcast(4, i32, 32)
|
||||
load_and_broadcast(4, i64, 64)
|
||||
|
||||
masked_load(4, i8, 8, 1)
|
||||
masked_load(4, i16, 16, 2)
|
||||
masked_load(4, i32, 32, 4)
|
||||
masked_load(4, i64, 64, 8)
|
||||
masked_load(i8, 1)
|
||||
masked_load(i16, 2)
|
||||
masked_load(i32, 4)
|
||||
masked_load(float, 4)
|
||||
masked_load(i64, 8)
|
||||
masked_load(double, 8)
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; gather/scatter
|
||||
|
||||
; define these with the macros from stdlib.m4
|
||||
|
||||
gen_gather(4, i8)
|
||||
gen_gather(4, i16)
|
||||
gen_gather(4, i32)
|
||||
gen_gather(4, i64)
|
||||
gen_gather_factored(i8)
|
||||
gen_gather_factored(i16)
|
||||
gen_gather_factored(i32)
|
||||
gen_gather_factored(float)
|
||||
gen_gather_factored(i64)
|
||||
gen_gather_factored(double)
|
||||
|
||||
gen_scatter(4, i8)
|
||||
gen_scatter(4, i16)
|
||||
gen_scatter(4, i32)
|
||||
gen_scatter(4, i64)
|
||||
gen_scatter(i8)
|
||||
gen_scatter(i16)
|
||||
gen_scatter(i32)
|
||||
gen_scatter(float)
|
||||
gen_scatter(i64)
|
||||
gen_scatter(double)
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; reciprocals in double precision, if supported
|
||||
|
||||
rsqrtd_decl()
|
||||
rcpd_decl()
|
||||
|
||||
transcendetals_decl()
|
||||
trigonometry_decl()
|
||||
|
||||
500
builtins/target-sse4-16.ll
Normal file
500
builtins/target-sse4-16.ll
Normal file
@@ -0,0 +1,500 @@
|
||||
;; Copyright (c) 2013, 2015, Google, Inc.
|
||||
;; All rights reserved.
|
||||
;;
|
||||
;; Redistribution and use in source and binary forms, with or without
|
||||
;; modification, are permitted provided that the following conditions are
|
||||
;; met:
|
||||
;;
|
||||
;; * Redistributions of source code must retain the above copyright
|
||||
;; notice, this list of conditions and the following disclaimer.
|
||||
;;
|
||||
;; * Redistributions in binary form must reproduce the above copyright
|
||||
;; notice, this list of conditions and the following disclaimer in the
|
||||
;; documentation and/or other materials provided with the distribution.
|
||||
;;
|
||||
;; * Neither the name of Google, Inc. nor the names of its
|
||||
;; contributors may be used to endorse or promote products derived from
|
||||
;; this software without specific prior written permission.
|
||||
;;
|
||||
;;
|
||||
;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
||||
;; IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
||||
;; TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
|
||||
;; PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
|
||||
;; OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||
;; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
;; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
;; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
;; LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
;; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
;; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
|
||||
; Define common 4-wide stuff
|
||||
define(`WIDTH',`8')
|
||||
define(`MASK',`i16')
|
||||
include(`util.m4')
|
||||
|
||||
stdlib_core()
|
||||
packed_load_and_store()
|
||||
scans()
|
||||
int64minmax()
|
||||
saturation_arithmetic()
|
||||
|
||||
include(`target-sse4-common.ll')
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; half conversion routines
|
||||
|
||||
declare float @__half_to_float_uniform(i16 %v) nounwind readnone
|
||||
declare <WIDTH x float> @__half_to_float_varying(<WIDTH x i16> %v) nounwind readnone
|
||||
declare i16 @__float_to_half_uniform(float %v) nounwind readnone
|
||||
declare <WIDTH x i16> @__float_to_half_varying(<WIDTH x float> %v) nounwind readnone
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; rcp
|
||||
|
||||
declare <4 x float> @llvm.x86.sse.rcp.ps(<4 x float>) nounwind readnone
|
||||
|
||||
define <WIDTH x float> @__rcp_varying_float(<WIDTH x float>) nounwind readonly alwaysinline {
|
||||
unary4to8(call, float, @llvm.x86.sse.rcp.ps, %0)
|
||||
; do one N-R iteration to improve precision
|
||||
; float iv = __rcp_v(v);
|
||||
; return iv * (2. - v * iv);
|
||||
%v_iv = fmul <8 x float> %0, %call
|
||||
%two_minus = fsub <8 x float> <float 2., float 2., float 2., float 2.,
|
||||
float 2., float 2., float 2., float 2.>, %v_iv
|
||||
%iv_mul = fmul <8 x float> %call, %two_minus
|
||||
ret <8 x float> %iv_mul
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
; rsqrt
|
||||
|
||||
declare <4 x float> @llvm.x86.sse.rsqrt.ps(<4 x float>) nounwind readnone
|
||||
|
||||
define <WIDTH x float> @__rsqrt_varying_float(<WIDTH x float> %v) nounwind readonly alwaysinline {
|
||||
; float is = __rsqrt_v(v);
|
||||
unary4to8(is, float, @llvm.x86.sse.rsqrt.ps, %v)
|
||||
; Newton-Raphson iteration to improve precision
|
||||
; return 0.5 * is * (3. - (v * is) * is);
|
||||
%v_is = fmul <8 x float> %v, %is
|
||||
%v_is_is = fmul <8 x float> %v_is, %is
|
||||
%three_sub = fsub <8 x float> <float 3., float 3., float 3., float 3.,
|
||||
float 3., float 3., float 3., float 3.>, %v_is_is
|
||||
%is_mul = fmul <8 x float> %is, %three_sub
|
||||
%half_scale = fmul <8 x float> <float 0.5, float 0.5, float 0.5, float 0.5,
|
||||
float 0.5, float 0.5, float 0.5, float 0.5>, %is_mul
|
||||
ret <8 x float> %half_scale
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
; sqrt
|
||||
|
||||
declare <4 x float> @llvm.x86.sse.sqrt.ps(<4 x float>) nounwind readnone
|
||||
|
||||
define <8 x float> @__sqrt_varying_float(<8 x float>) nounwind readonly alwaysinline {
|
||||
unary4to8(call, float, @llvm.x86.sse.sqrt.ps, %0)
|
||||
ret <8 x float> %call
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; double precision sqrt
|
||||
|
||||
declare <2 x double> @llvm.x86.sse2.sqrt.pd(<2 x double>) nounwind readnone
|
||||
|
||||
define <8 x double> @__sqrt_varying_double(<8 x double>) nounwind
|
||||
alwaysinline {
|
||||
unary2to8(ret, double, @llvm.x86.sse2.sqrt.pd, %0)
|
||||
ret <8 x double> %ret
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; rounding floats
|
||||
|
||||
declare <4 x float> @llvm.x86.sse41.round.ps(<4 x float>, i32) nounwind readnone
|
||||
|
||||
define <8 x float> @__round_varying_float(<8 x float>) nounwind readonly alwaysinline {
|
||||
; roundps, round mode nearest 0b00 | don't signal precision exceptions 0b1000 = 8
|
||||
round4to8(%0, 8)
|
||||
}
|
||||
|
||||
define <8 x float> @__floor_varying_float(<8 x float>) nounwind readonly alwaysinline {
|
||||
; roundps, round down 0b01 | don't signal precision exceptions 0b1001 = 9
|
||||
round4to8(%0, 9)
|
||||
}
|
||||
|
||||
define <8 x float> @__ceil_varying_float(<8 x float>) nounwind readonly alwaysinline {
|
||||
; roundps, round up 0b10 | don't signal precision exceptions 0b1010 = 10
|
||||
round4to8(%0, 10)
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; rounding doubles
|
||||
|
||||
declare <2 x double> @llvm.x86.sse41.round.pd(<2 x double>, i32) nounwind readnone
|
||||
|
||||
define <8 x double> @__round_varying_double(<8 x double>) nounwind readonly alwaysinline {
|
||||
round2to8double(%0, 8)
|
||||
}
|
||||
|
||||
define <8 x double> @__floor_varying_double(<8 x double>) nounwind readonly alwaysinline {
|
||||
round2to8double(%0, 9)
|
||||
}
|
||||
|
||||
define <8 x double> @__ceil_varying_double(<8 x double>) nounwind readonly alwaysinline {
|
||||
round2to8double(%0, 10)
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; float min/max
|
||||
|
||||
declare <4 x float> @llvm.x86.sse.max.ps(<4 x float>, <4 x float>) nounwind readnone
|
||||
declare <4 x float> @llvm.x86.sse.min.ps(<4 x float>, <4 x float>) nounwind readnone
|
||||
|
||||
define <8 x float> @__max_varying_float(<8 x float>, <8 x float>) nounwind readonly alwaysinline {
|
||||
binary4to8(call, float, @llvm.x86.sse.max.ps, %0, %1)
|
||||
ret <8 x float> %call
|
||||
}
|
||||
|
||||
define <8 x float> @__min_varying_float(<8 x float>, <8 x float>) nounwind readonly alwaysinline {
|
||||
binary4to8(call, float, @llvm.x86.sse.min.ps, %0, %1)
|
||||
ret <8 x float> %call
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; int32 min/max
|
||||
|
||||
define <8 x i32> @__min_varying_int32(<8 x i32>, <8 x i32>) nounwind readonly alwaysinline {
|
||||
binary4to8(call, i32, @llvm.x86.sse41.pminsd, %0, %1)
|
||||
ret <8 x i32> %call
|
||||
}
|
||||
|
||||
define <8 x i32> @__max_varying_int32(<8 x i32>, <8 x i32>) nounwind readonly alwaysinline {
|
||||
binary4to8(call, i32, @llvm.x86.sse41.pmaxsd, %0, %1)
|
||||
ret <8 x i32> %call
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
; unsigned int min/max
|
||||
|
||||
define <8 x i32> @__min_varying_uint32(<8 x i32>, <8 x i32>) nounwind readonly alwaysinline {
|
||||
binary4to8(call, i32, @llvm.x86.sse41.pminud, %0, %1)
|
||||
ret <8 x i32> %call
|
||||
}
|
||||
|
||||
define <8 x i32> @__max_varying_uint32(<8 x i32>, <8 x i32>) nounwind readonly alwaysinline {
|
||||
binary4to8(call, i32, @llvm.x86.sse41.pmaxud, %0, %1)
|
||||
ret <8 x i32> %call
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; double precision min/max
|
||||
|
||||
declare <2 x double> @llvm.x86.sse2.max.pd(<2 x double>, <2 x double>) nounwind readnone
|
||||
declare <2 x double> @llvm.x86.sse2.min.pd(<2 x double>, <2 x double>) nounwind readnone
|
||||
|
||||
define <8 x double> @__min_varying_double(<8 x double>, <8 x double>) nounwind readnone {
|
||||
binary2to8(ret, double, @llvm.x86.sse2.min.pd, %0, %1)
|
||||
ret <8 x double> %ret
|
||||
}
|
||||
|
||||
define <8 x double> @__max_varying_double(<8 x double>, <8 x double>) nounwind readnone {
|
||||
binary2to8(ret, double, @llvm.x86.sse2.max.pd, %0, %1)
|
||||
ret <8 x double> %ret
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; svml
|
||||
|
||||
; FIXME
|
||||
include(`svml.m4')
|
||||
svml_stubs(float,f,WIDTH)
|
||||
svml_stubs(double,d,WIDTH)
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
; horizontal ops / reductions
|
||||
|
||||
declare i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8>) nounwind readnone
|
||||
|
||||
define i64 @__movmsk(<8 x MASK>) nounwind readnone alwaysinline {
|
||||
%m8 = trunc <8 x MASK> %0 to <8 x i8>
|
||||
%mask8 = shufflevector <8 x i8> %m8, <8 x i8> zeroinitializer,
|
||||
<16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
|
||||
i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8>
|
||||
%m = call i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8> %mask8)
|
||||
%m64 = zext i32 %m to i64
|
||||
ret i64 %m64
|
||||
}
|
||||
|
||||
define i1 @__any(<8 x MASK>) nounwind readnone alwaysinline {
|
||||
%m = call i64 @__movmsk(<8 x MASK> %0)
|
||||
%mne = icmp ne i64 %m, 0
|
||||
ret i1 %mne
|
||||
}
|
||||
|
||||
define i1 @__all(<8 x MASK>) nounwind readnone alwaysinline {
|
||||
%m = call i64 @__movmsk(<8 x MASK> %0)
|
||||
%meq = icmp eq i64 %m, ALL_ON_MASK
|
||||
ret i1 %meq
|
||||
}
|
||||
|
||||
define i1 @__none(<8 x MASK>) nounwind readnone alwaysinline {
|
||||
%m = call i64 @__movmsk(<8 x MASK> %0)
|
||||
%meq = icmp eq i64 %m, 0
|
||||
ret i1 %meq
|
||||
}
|
||||
|
||||
declare <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8>, <16 x i8>) nounwind readnone
|
||||
|
||||
define i16 @__reduce_add_int8(<8 x i8>) nounwind readnone alwaysinline {
|
||||
%wide8 = shufflevector <8 x i8> %0, <8 x i8> zeroinitializer,
|
||||
<16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
|
||||
i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8>
|
||||
%rv = call <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8> %wide8,
|
||||
<16 x i8> zeroinitializer)
|
||||
%r0 = extractelement <2 x i64> %rv, i32 0
|
||||
%r1 = extractelement <2 x i64> %rv, i32 1
|
||||
%r = add i64 %r0, %r1
|
||||
%r16 = trunc i64 %r to i16
|
||||
ret i16 %r16
|
||||
}
|
||||
|
||||
define internal <8 x i16> @__add_varying_i16(<8 x i16>,
|
||||
<8 x i16>) nounwind readnone alwaysinline {
|
||||
%r = add <8 x i16> %0, %1
|
||||
ret <8 x i16> %r
|
||||
}
|
||||
|
||||
define internal i16 @__add_uniform_i16(i16, i16) nounwind readnone alwaysinline {
|
||||
%r = add i16 %0, %1
|
||||
ret i16 %r
|
||||
}
|
||||
|
||||
define i16 @__reduce_add_int16(<8 x i16>) nounwind readnone alwaysinline {
|
||||
reduce8(i16, @__add_varying_i16, @__add_uniform_i16)
|
||||
}
|
||||
|
||||
define internal <8 x float> @__add_varying_float(<8 x float>, <8 x float>) {
|
||||
%r = fadd <8 x float> %0, %1
|
||||
ret <8 x float> %r
|
||||
}
|
||||
|
||||
define internal float @__add_uniform_float(float, float) {
|
||||
%r = fadd float %0, %1
|
||||
ret float %r
|
||||
}
|
||||
|
||||
define float @__reduce_add_float(<8 x float>) nounwind readonly alwaysinline {
|
||||
reduce8(float, @__add_varying_float, @__add_uniform_float)
|
||||
}
|
||||
|
||||
define float @__reduce_min_float(<8 x float>) nounwind readnone {
|
||||
reduce8(float, @__min_varying_float, @__min_uniform_float)
|
||||
}
|
||||
|
||||
define float @__reduce_max_float(<8 x float>) nounwind readnone {
|
||||
reduce8(float, @__max_varying_float, @__max_uniform_float)
|
||||
}
|
||||
|
||||
define internal <8 x i32> @__add_varying_int32(<8 x i32>, <8 x i32>) {
|
||||
%r = add <8 x i32> %0, %1
|
||||
ret <8 x i32> %r
|
||||
}
|
||||
|
||||
define internal i32 @__add_uniform_int32(i32, i32) {
|
||||
%r = add i32 %0, %1
|
||||
ret i32 %r
|
||||
}
|
||||
|
||||
define i32 @__reduce_add_int32(<8 x i32>) nounwind readnone {
|
||||
reduce8(i32, @__add_varying_int32, @__add_uniform_int32)
|
||||
}
|
||||
|
||||
define i32 @__reduce_min_int32(<8 x i32>) nounwind readnone {
|
||||
reduce8(i32, @__min_varying_int32, @__min_uniform_int32)
|
||||
}
|
||||
|
||||
define i32 @__reduce_max_int32(<8 x i32>) nounwind readnone {
|
||||
reduce8(i32, @__max_varying_int32, @__max_uniform_int32)
|
||||
}
|
||||
|
||||
define i32 @__reduce_min_uint32(<8 x i32>) nounwind readnone {
|
||||
reduce8(i32, @__min_varying_uint32, @__min_uniform_uint32)
|
||||
}
|
||||
|
||||
define i32 @__reduce_max_uint32(<8 x i32>) nounwind readnone {
|
||||
reduce8(i32, @__max_varying_uint32, @__max_uniform_uint32)
|
||||
}
|
||||
|
||||
define internal <8 x double> @__add_varying_double(<8 x double>, <8 x double>) {
|
||||
%r = fadd <8 x double> %0, %1
|
||||
ret <8 x double> %r
|
||||
}
|
||||
|
||||
define internal double @__add_uniform_double(double, double) {
|
||||
%r = fadd double %0, %1
|
||||
ret double %r
|
||||
}
|
||||
|
||||
define double @__reduce_add_double(<8 x double>) nounwind readnone {
|
||||
reduce8(double, @__add_varying_double, @__add_uniform_double)
|
||||
}
|
||||
|
||||
define double @__reduce_min_double(<8 x double>) nounwind readnone {
|
||||
reduce8(double, @__min_varying_double, @__min_uniform_double)
|
||||
}
|
||||
|
||||
define double @__reduce_max_double(<8 x double>) nounwind readnone {
|
||||
reduce8(double, @__max_varying_double, @__max_uniform_double)
|
||||
}
|
||||
|
||||
define internal <8 x i64> @__add_varying_int64(<8 x i64>, <8 x i64>) {
|
||||
%r = add <8 x i64> %0, %1
|
||||
ret <8 x i64> %r
|
||||
}
|
||||
|
||||
define internal i64 @__add_uniform_int64(i64, i64) {
|
||||
%r = add i64 %0, %1
|
||||
ret i64 %r
|
||||
}
|
||||
|
||||
define i64 @__reduce_add_int64(<8 x i64>) nounwind readnone {
|
||||
reduce8(i64, @__add_varying_int64, @__add_uniform_int64)
|
||||
}
|
||||
|
||||
define i64 @__reduce_min_int64(<8 x i64>) nounwind readnone {
|
||||
reduce8(i64, @__min_varying_int64, @__min_uniform_int64)
|
||||
}
|
||||
|
||||
define i64 @__reduce_max_int64(<8 x i64>) nounwind readnone {
|
||||
reduce8(i64, @__max_varying_int64, @__max_uniform_int64)
|
||||
}
|
||||
|
||||
define i64 @__reduce_min_uint64(<8 x i64>) nounwind readnone {
|
||||
reduce8(i64, @__min_varying_uint64, @__min_uniform_uint64)
|
||||
}
|
||||
|
||||
define i64 @__reduce_max_uint64(<8 x i64>) nounwind readnone {
|
||||
reduce8(i64, @__max_varying_uint64, @__max_uniform_uint64)
|
||||
}
|
||||
|
||||
reduce_equal(8)
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; masked store
|
||||
|
||||
define void @__masked_store_blend_i64(<8 x i64>* nocapture, <8 x i64>,
|
||||
<8 x MASK> %mask) nounwind
|
||||
alwaysinline {
|
||||
%mask_as_i1 = trunc <8 x MASK> %mask to <8 x i1>
|
||||
%old = load PTR_OP_ARGS(`<8 x i64>') %0, align 4
|
||||
%blend = select <8 x i1> %mask_as_i1, <8 x i64> %1, <8 x i64> %old
|
||||
store <8 x i64> %blend, <8 x i64>* %0, align 4
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @__masked_store_blend_i32(<8 x i32>* nocapture, <8 x i32>,
|
||||
<8 x MASK> %mask) nounwind alwaysinline {
|
||||
%mask_as_i1 = trunc <8 x MASK> %mask to <8 x i1>
|
||||
%old = load PTR_OP_ARGS(`<8 x i32>') %0, align 4
|
||||
%blend = select <8 x i1> %mask_as_i1, <8 x i32> %1, <8 x i32> %old
|
||||
store <8 x i32> %blend, <8 x i32>* %0, align 4
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @__masked_store_blend_i16(<8 x i16>* nocapture, <8 x i16>,
|
||||
<8 x MASK> %mask) nounwind alwaysinline {
|
||||
%mask_as_i1 = trunc <8 x MASK> %mask to <8 x i1>
|
||||
%old = load PTR_OP_ARGS(`<8 x i16>') %0, align 4
|
||||
%blend = select <8 x i1> %mask_as_i1, <8 x i16> %1, <8 x i16> %old
|
||||
store <8 x i16> %blend, <8 x i16>* %0, align 4
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @__masked_store_blend_i8(<8 x i8>* nocapture, <8 x i8>,
|
||||
<8 x MASK> %mask) nounwind alwaysinline {
|
||||
%mask_as_i1 = trunc <8 x MASK> %mask to <8 x i1>
|
||||
%old = load PTR_OP_ARGS(`<8 x i8>') %0, align 4
|
||||
%blend = select <8 x i1> %mask_as_i1, <8 x i8> %1, <8 x i8> %old
|
||||
store <8 x i8> %blend, <8 x i8>* %0, align 4
|
||||
ret void
|
||||
}
|
||||
|
||||
gen_masked_store(i8)
|
||||
gen_masked_store(i16)
|
||||
gen_masked_store(i32)
|
||||
gen_masked_store(i64)
|
||||
|
||||
masked_store_float_double()
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; unaligned loads/loads+broadcasts
|
||||
|
||||
masked_load(i8, 1)
|
||||
masked_load(i16, 2)
|
||||
masked_load(i32, 4)
|
||||
masked_load(float, 4)
|
||||
masked_load(i64, 8)
|
||||
masked_load(double, 8)
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; gather/scatter
|
||||
|
||||
; define these with the macros from stdlib.m4
|
||||
|
||||
gen_gather_factored(i8)
|
||||
gen_gather_factored(i16)
|
||||
gen_gather_factored(i32)
|
||||
gen_gather_factored(float)
|
||||
gen_gather_factored(i64)
|
||||
gen_gather_factored(double)
|
||||
|
||||
gen_scatter(i8)
|
||||
gen_scatter(i16)
|
||||
gen_scatter(i32)
|
||||
gen_scatter(float)
|
||||
gen_scatter(i64)
|
||||
gen_scatter(double)
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; int8/int16 builtins
|
||||
|
||||
declare <16 x i8> @llvm.x86.sse2.pavg.b(<16 x i8>, <16 x i8>) nounwind readnone
|
||||
|
||||
define <8 x i8> @__avg_up_uint8(<8 x i8>, <8 x i8>) {
|
||||
%v0 = shufflevector <8 x i8> %0, <8 x i8> undef,
|
||||
<16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
|
||||
i32 undef, i32 undef, i32 undef, i32 undef,
|
||||
i32 undef, i32 undef, i32 undef, i32 undef>
|
||||
%v1 = shufflevector <8 x i8> %1, <8 x i8> undef,
|
||||
<16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
|
||||
i32 undef, i32 undef, i32 undef, i32 undef,
|
||||
i32 undef, i32 undef, i32 undef, i32 undef>
|
||||
%r16 = call <16 x i8> @llvm.x86.sse2.pavg.b(<16 x i8> %v0, <16 x i8> %v1)
|
||||
%r = shufflevector <16 x i8> %r16, <16 x i8> undef,
|
||||
<8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
||||
ret <8 x i8> %r
|
||||
}
|
||||
|
||||
declare <8 x i16> @llvm.x86.sse2.pavg.w(<8 x i16>, <8 x i16>) nounwind readnone
|
||||
|
||||
define <8 x i16> @__avg_up_uint16(<8 x i16>, <8 x i16>) {
|
||||
%r = call <8 x i16> @llvm.x86.sse2.pavg.w(<8 x i16> %0, <8 x i16> %1)
|
||||
ret <8 x i16> %r
|
||||
}
|
||||
|
||||
define_avg_up_int8()
|
||||
define_avg_up_int16()
|
||||
define_down_avgs()
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; reciprocals in double precision, if supported
|
||||
|
||||
rsqrtd_decl()
|
||||
rcpd_decl()
|
||||
|
||||
transcendetals_decl()
|
||||
trigonometry_decl()
|
||||
498
builtins/target-sse4-8.ll
Normal file
498
builtins/target-sse4-8.ll
Normal file
@@ -0,0 +1,498 @@
|
||||
;; Copyright (c) 2013, 2015, Google, Inc.
|
||||
;; All rights reserved.
|
||||
;;
|
||||
;; Redistribution and use in source and binary forms, with or without
|
||||
;; modification, are permitted provided that the following conditions are
|
||||
;; met:
|
||||
;;
|
||||
;; * Redistributions of source code must retain the above copyright
|
||||
;; notice, this list of conditions and the following disclaimer.
|
||||
;;
|
||||
;; * Redistributions in binary form must reproduce the above copyright
|
||||
;; notice, this list of conditions and the following disclaimer in the
|
||||
;; documentation and/or other materials provided with the distribution.
|
||||
;;
|
||||
;; * Neither the name of Google, Inc. nor the names of its
|
||||
;; contributors may be used to endorse or promote products derived from
|
||||
;; this software without specific prior written permission.
|
||||
;;
|
||||
;;
|
||||
;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
||||
;; IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
||||
;; TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
|
||||
;; PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
|
||||
;; OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||
;; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
;; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
;; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
;; LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
;; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
;; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
|
||||
; Define common 4-wide stuff
|
||||
define(`WIDTH',`16')
|
||||
define(`MASK',`i8')
|
||||
include(`util.m4')
|
||||
|
||||
stdlib_core()
|
||||
packed_load_and_store()
|
||||
scans()
|
||||
int64minmax()
|
||||
saturation_arithmetic()
|
||||
|
||||
include(`target-sse4-common.ll')
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; half conversion routines
|
||||
|
||||
declare float @__half_to_float_uniform(i16 %v) nounwind readnone
|
||||
declare <WIDTH x float> @__half_to_float_varying(<WIDTH x i16> %v) nounwind readnone
|
||||
declare i16 @__float_to_half_uniform(float %v) nounwind readnone
|
||||
declare <WIDTH x i16> @__float_to_half_varying(<WIDTH x float> %v) nounwind readnone
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; rcp
|
||||
|
||||
declare <4 x float> @llvm.x86.sse.rcp.ps(<4 x float>) nounwind readnone
|
||||
|
||||
define <WIDTH x float> @__rcp_varying_float(<WIDTH x float>) nounwind readonly alwaysinline {
|
||||
unary4to16(call, float, @llvm.x86.sse.rcp.ps, %0)
|
||||
; do one N-R iteration to improve precision
|
||||
; float iv = __rcp_v(v);
|
||||
; return iv * (2. - v * iv);
|
||||
%v_iv = fmul <16 x float> %0, %call
|
||||
%two_minus = fsub <16 x float> <float 2., float 2., float 2., float 2.,
|
||||
float 2., float 2., float 2., float 2.,
|
||||
float 2., float 2., float 2., float 2.,
|
||||
float 2., float 2., float 2., float 2.>, %v_iv
|
||||
%iv_mul = fmul <16 x float> %call, %two_minus
|
||||
ret <16 x float> %iv_mul
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
; rsqrt
|
||||
|
||||
declare <4 x float> @llvm.x86.sse.rsqrt.ps(<4 x float>) nounwind readnone
|
||||
|
||||
define <16 x float> @__rsqrt_varying_float(<16 x float> %v) nounwind readonly alwaysinline {
|
||||
; float is = __rsqrt_v(v);
|
||||
unary4to16(is, float, @llvm.x86.sse.rsqrt.ps, %v)
|
||||
; Newton-Raphson iteration to improve precision
|
||||
; return 0.5 * is * (3. - (v * is) * is);
|
||||
%v_is = fmul <16 x float> %v, %is
|
||||
%v_is_is = fmul <16 x float> %v_is, %is
|
||||
%three_sub = fsub <16 x float> <float 3., float 3., float 3., float 3.,
|
||||
float 3., float 3., float 3., float 3.,
|
||||
float 3., float 3., float 3., float 3.,
|
||||
float 3., float 3., float 3., float 3.>, %v_is_is
|
||||
%is_mul = fmul <16 x float> %is, %three_sub
|
||||
%half_scale = fmul <16 x float> <float 0.5, float 0.5, float 0.5, float 0.5,
|
||||
float 0.5, float 0.5, float 0.5, float 0.5,
|
||||
float 0.5, float 0.5, float 0.5, float 0.5,
|
||||
float 0.5, float 0.5, float 0.5, float 0.5>, %is_mul
|
||||
ret <16 x float> %half_scale
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
; sqrt
|
||||
|
||||
declare <4 x float> @llvm.x86.sse.sqrt.ps(<4 x float>) nounwind readnone
|
||||
|
||||
define <16 x float> @__sqrt_varying_float(<16 x float>) nounwind readonly alwaysinline {
|
||||
unary4to16(call, float, @llvm.x86.sse.sqrt.ps, %0)
|
||||
ret <16 x float> %call
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; double precision sqrt
|
||||
|
||||
declare <2 x double> @llvm.x86.sse2.sqrt.pd(<2 x double>) nounwind readnone
|
||||
|
||||
define <16 x double> @__sqrt_varying_double(<16 x double>) nounwind
|
||||
alwaysinline {
|
||||
unary2to16(ret, double, @llvm.x86.sse2.sqrt.pd, %0)
|
||||
ret <16 x double> %ret
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; rounding floats
|
||||
|
||||
declare <4 x float> @llvm.x86.sse41.round.ps(<4 x float>, i32) nounwind readnone
|
||||
|
||||
define <16 x float> @__round_varying_float(<16 x float>) nounwind readonly alwaysinline {
|
||||
; roundps, round mode nearest 0b00 | don't signal precision exceptions 0b1000 = 8
|
||||
round4to16(%0, 8)
|
||||
}
|
||||
|
||||
define <16 x float> @__floor_varying_float(<16 x float>) nounwind readonly alwaysinline {
|
||||
; roundps, round down 0b01 | don't signal precision exceptions 0b1001 = 9
|
||||
round4to16(%0, 9)
|
||||
}
|
||||
|
||||
define <16 x float> @__ceil_varying_float(<16 x float>) nounwind readonly alwaysinline {
|
||||
; roundps, round up 0b10 | don't signal precision exceptions 0b1010 = 10
|
||||
round4to16(%0, 10)
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; rounding doubles
|
||||
|
||||
declare <2 x double> @llvm.x86.sse41.round.pd(<2 x double>, i32) nounwind readnone
|
||||
|
||||
define <16 x double> @__round_varying_double(<16 x double>) nounwind readonly alwaysinline {
|
||||
round2to16double(%0, 8)
|
||||
}
|
||||
|
||||
define <16 x double> @__floor_varying_double(<16 x double>) nounwind readonly alwaysinline {
|
||||
; roundpd, round down 0b01 | don't signal precision exceptions 0b1001 = 9
|
||||
round2to16double(%0, 9)
|
||||
}
|
||||
|
||||
define <16 x double> @__ceil_varying_double(<16 x double>) nounwind readonly alwaysinline {
|
||||
; roundpd, round up 0b10 | don't signal precision exceptions 0b1010 = 10
|
||||
round2to16double(%0, 10)
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; float min/max
|
||||
|
||||
declare <4 x float> @llvm.x86.sse.max.ps(<4 x float>, <4 x float>) nounwind readnone
|
||||
declare <4 x float> @llvm.x86.sse.min.ps(<4 x float>, <4 x float>) nounwind readnone
|
||||
|
||||
define <16 x float> @__max_varying_float(<16 x float>, <16 x float>) nounwind readonly alwaysinline {
|
||||
binary4to16(call, float, @llvm.x86.sse.max.ps, %0, %1)
|
||||
ret <16 x float> %call
|
||||
}
|
||||
|
||||
define <16 x float> @__min_varying_float(<16 x float>, <16 x float>) nounwind readonly alwaysinline {
|
||||
binary4to16(call, float, @llvm.x86.sse.min.ps, %0, %1)
|
||||
ret <16 x float> %call
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; int32 min/max
|
||||
|
||||
define <16 x i32> @__min_varying_int32(<16 x i32>, <16 x i32>) nounwind readonly alwaysinline {
|
||||
binary4to16(call, i32, @llvm.x86.sse41.pminsd, %0, %1)
|
||||
ret <16 x i32> %call
|
||||
}
|
||||
|
||||
define <16 x i32> @__max_varying_int32(<16 x i32>, <16 x i32>) nounwind readonly alwaysinline {
|
||||
binary4to16(call, i32, @llvm.x86.sse41.pmaxsd, %0, %1)
|
||||
ret <16 x i32> %call
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
; unsigned int min/max
|
||||
|
||||
define <16 x i32> @__min_varying_uint32(<16 x i32>, <16 x i32>) nounwind readonly alwaysinline {
|
||||
binary4to16(call, i32, @llvm.x86.sse41.pminud, %0, %1)
|
||||
ret <16 x i32> %call
|
||||
}
|
||||
|
||||
define <16 x i32> @__max_varying_uint32(<16 x i32>, <16 x i32>) nounwind readonly alwaysinline {
|
||||
binary4to16(call, i32, @llvm.x86.sse41.pmaxud, %0, %1)
|
||||
ret <16 x i32> %call
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; double precision min/max
|
||||
|
||||
declare <2 x double> @llvm.x86.sse2.max.pd(<2 x double>, <2 x double>) nounwind readnone
|
||||
declare <2 x double> @llvm.x86.sse2.min.pd(<2 x double>, <2 x double>) nounwind readnone
|
||||
|
||||
define <16 x double> @__min_varying_double(<16 x double>, <16 x double>) nounwind readnone {
|
||||
binary2to16(ret, double, @llvm.x86.sse2.min.pd, %0, %1)
|
||||
ret <16 x double> %ret
|
||||
}
|
||||
|
||||
define <16 x double> @__max_varying_double(<16 x double>, <16 x double>) nounwind readnone {
|
||||
binary2to16(ret, double, @llvm.x86.sse2.max.pd, %0, %1)
|
||||
ret <16 x double> %ret
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; svml
|
||||
|
||||
; FIXME
|
||||
|
||||
include(`svml.m4')
|
||||
svml_stubs(float,f,WIDTH)
|
||||
svml_stubs(double,d,WIDTH)
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
; horizontal ops / reductions
|
||||
|
||||
declare i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8>) nounwind readnone
|
||||
|
||||
define i64 @__movmsk(<16 x i8>) nounwind readnone alwaysinline {
|
||||
%m = call i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8> %0)
|
||||
%m64 = zext i32 %m to i64
|
||||
ret i64 %m64
|
||||
}
|
||||
|
||||
define i1 @__any(<16 x i8>) nounwind readnone alwaysinline {
|
||||
%m = call i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8> %0)
|
||||
%mne = icmp ne i32 %m, 0
|
||||
ret i1 %mne
|
||||
}
|
||||
|
||||
define i1 @__all(<16 x i8>) nounwind readnone alwaysinline {
|
||||
%m = call i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8> %0)
|
||||
%meq = icmp eq i32 %m, ALL_ON_MASK
|
||||
ret i1 %meq
|
||||
}
|
||||
|
||||
define i1 @__none(<16 x i8>) nounwind readnone alwaysinline {
|
||||
%m = call i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8> %0)
|
||||
%meq = icmp eq i32 %m, 0
|
||||
ret i1 %meq
|
||||
}
|
||||
|
||||
declare <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8>, <16 x i8>) nounwind readnone
|
||||
|
||||
define i16 @__reduce_add_int8(<16 x i8>) nounwind readnone alwaysinline {
|
||||
%rv = call <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8> %0,
|
||||
<16 x i8> zeroinitializer)
|
||||
%r0 = extractelement <2 x i64> %rv, i32 0
|
||||
%r1 = extractelement <2 x i64> %rv, i32 1
|
||||
%r = add i64 %r0, %r1
|
||||
%r16 = trunc i64 %r to i16
|
||||
ret i16 %r16
|
||||
}
|
||||
|
||||
define internal <16 x i16> @__add_varying_i16(<16 x i16>,
|
||||
<16 x i16>) nounwind readnone alwaysinline {
|
||||
%r = add <16 x i16> %0, %1
|
||||
ret <16 x i16> %r
|
||||
}
|
||||
|
||||
define internal i16 @__add_uniform_i16(i16, i16) nounwind readnone alwaysinline {
|
||||
%r = add i16 %0, %1
|
||||
ret i16 %r
|
||||
}
|
||||
|
||||
define i16 @__reduce_add_int16(<16 x i16>) nounwind readnone alwaysinline {
|
||||
reduce16(i16, @__add_varying_i16, @__add_uniform_i16)
|
||||
}
|
||||
|
||||
define internal <16 x float> @__add_varying_float(<16 x float>, <16 x float>) {
|
||||
%r = fadd <16 x float> %0, %1
|
||||
ret <16 x float> %r
|
||||
}
|
||||
|
||||
define internal float @__add_uniform_float(float, float) {
|
||||
%r = fadd float %0, %1
|
||||
ret float %r
|
||||
}
|
||||
|
||||
define float @__reduce_add_float(<16 x float>) nounwind readonly alwaysinline {
|
||||
reduce16(float, @__add_varying_float, @__add_uniform_float)
|
||||
}
|
||||
|
||||
define float @__reduce_min_float(<16 x float>) nounwind readnone {
|
||||
reduce16(float, @__min_varying_float, @__min_uniform_float)
|
||||
}
|
||||
|
||||
define float @__reduce_max_float(<16 x float>) nounwind readnone {
|
||||
reduce16(float, @__max_varying_float, @__max_uniform_float)
|
||||
}
|
||||
|
||||
define internal <16 x i32> @__add_varying_int32(<16 x i32>, <16 x i32>) {
|
||||
%r = add <16 x i32> %0, %1
|
||||
ret <16 x i32> %r
|
||||
}
|
||||
|
||||
define internal i32 @__add_uniform_int32(i32, i32) {
|
||||
%r = add i32 %0, %1
|
||||
ret i32 %r
|
||||
}
|
||||
|
||||
define i32 @__reduce_add_int32(<16 x i32>) nounwind readnone {
|
||||
reduce16(i32, @__add_varying_int32, @__add_uniform_int32)
|
||||
}
|
||||
|
||||
define i32 @__reduce_min_int32(<16 x i32>) nounwind readnone {
|
||||
reduce16(i32, @__min_varying_int32, @__min_uniform_int32)
|
||||
}
|
||||
|
||||
define i32 @__reduce_max_int32(<16 x i32>) nounwind readnone {
|
||||
reduce16(i32, @__max_varying_int32, @__max_uniform_int32)
|
||||
}
|
||||
|
||||
define i32 @__reduce_min_uint32(<16 x i32>) nounwind readnone {
|
||||
reduce16(i32, @__min_varying_uint32, @__min_uniform_uint32)
|
||||
}
|
||||
|
||||
define i32 @__reduce_max_uint32(<16 x i32>) nounwind readnone {
|
||||
reduce16(i32, @__max_varying_uint32, @__max_uniform_uint32)
|
||||
}
|
||||
|
||||
define internal <16 x double> @__add_varying_double(<16 x double>, <16 x double>) {
|
||||
%r = fadd <16 x double> %0, %1
|
||||
ret <16 x double> %r
|
||||
}
|
||||
|
||||
define internal double @__add_uniform_double(double, double) {
|
||||
%r = fadd double %0, %1
|
||||
ret double %r
|
||||
}
|
||||
|
||||
define double @__reduce_add_double(<16 x double>) nounwind readnone {
|
||||
reduce16(double, @__add_varying_double, @__add_uniform_double)
|
||||
}
|
||||
|
||||
define double @__reduce_min_double(<16 x double>) nounwind readnone {
|
||||
reduce16(double, @__min_varying_double, @__min_uniform_double)
|
||||
}
|
||||
|
||||
define double @__reduce_max_double(<16 x double>) nounwind readnone {
|
||||
reduce16(double, @__max_varying_double, @__max_uniform_double)
|
||||
}
|
||||
|
||||
define internal <16 x i64> @__add_varying_int64(<16 x i64>, <16 x i64>) {
|
||||
%r = add <16 x i64> %0, %1
|
||||
ret <16 x i64> %r
|
||||
}
|
||||
|
||||
define internal i64 @__add_uniform_int64(i64, i64) {
|
||||
%r = add i64 %0, %1
|
||||
ret i64 %r
|
||||
}
|
||||
|
||||
define i64 @__reduce_add_int64(<16 x i64>) nounwind readnone {
|
||||
reduce16(i64, @__add_varying_int64, @__add_uniform_int64)
|
||||
}
|
||||
|
||||
define i64 @__reduce_min_int64(<16 x i64>) nounwind readnone {
|
||||
reduce16(i64, @__min_varying_int64, @__min_uniform_int64)
|
||||
}
|
||||
|
||||
define i64 @__reduce_max_int64(<16 x i64>) nounwind readnone {
|
||||
reduce16(i64, @__max_varying_int64, @__max_uniform_int64)
|
||||
}
|
||||
|
||||
define i64 @__reduce_min_uint64(<16 x i64>) nounwind readnone {
|
||||
reduce16(i64, @__min_varying_uint64, @__min_uniform_uint64)
|
||||
}
|
||||
|
||||
define i64 @__reduce_max_uint64(<16 x i64>) nounwind readnone {
|
||||
reduce16(i64, @__max_varying_uint64, @__max_uniform_uint64)
|
||||
}
|
||||
|
||||
reduce_equal(16)
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; masked store
|
||||
|
||||
define void @__masked_store_blend_i64(<16 x i64>* nocapture, <16 x i64>,
|
||||
<16 x i8> %mask) nounwind
|
||||
alwaysinline {
|
||||
%mask_as_i1 = trunc <16 x MASK> %mask to <16 x i1>
|
||||
%old = load PTR_OP_ARGS(`<16 x i64>') %0, align 4
|
||||
%blend = select <16 x i1> %mask_as_i1, <16 x i64> %1, <16 x i64> %old
|
||||
store <16 x i64> %blend, <16 x i64>* %0, align 4
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @__masked_store_blend_i32(<16 x i32>* nocapture, <16 x i32>,
|
||||
<16 x MASK> %mask) nounwind alwaysinline {
|
||||
%mask_as_i1 = trunc <16 x MASK> %mask to <16 x i1>
|
||||
%old = load PTR_OP_ARGS(`<16 x i32>') %0, align 4
|
||||
%blend = select <16 x i1> %mask_as_i1, <16 x i32> %1, <16 x i32> %old
|
||||
store <16 x i32> %blend, <16 x i32>* %0, align 4
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @__masked_store_blend_i16(<16 x i16>* nocapture, <16 x i16>,
|
||||
<16 x MASK> %mask) nounwind alwaysinline {
|
||||
%mask_as_i1 = trunc <16 x MASK> %mask to <16 x i1>
|
||||
%old = load PTR_OP_ARGS(`<16 x i16>') %0, align 4
|
||||
%blend = select <16 x i1> %mask_as_i1, <16 x i16> %1, <16 x i16> %old
|
||||
store <16 x i16> %blend, <16 x i16>* %0, align 4
|
||||
ret void
|
||||
}
|
||||
|
||||
declare <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8>, <16 x i8>, <16 x i8>) nounwind readnone
|
||||
|
||||
define void @__masked_store_blend_i8(<16 x i8>* nocapture, <16 x i8>,
|
||||
<16 x MASK> %mask) nounwind alwaysinline {
|
||||
%old = load PTR_OP_ARGS(`<16 x i8>') %0, align 4
|
||||
%blend = call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> %old, <16 x i8> %1,
|
||||
<16 x i8> %mask)
|
||||
store <16 x i8> %blend, <16 x i8>* %0, align 4
|
||||
ret void
|
||||
}
|
||||
|
||||
gen_masked_store(i8)
|
||||
gen_masked_store(i16)
|
||||
gen_masked_store(i32)
|
||||
gen_masked_store(i64)
|
||||
|
||||
masked_store_float_double()
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; unaligned loads/loads+broadcasts
|
||||
|
||||
masked_load(i8, 1)
|
||||
masked_load(i16, 2)
|
||||
masked_load(i32, 4)
|
||||
masked_load(float, 4)
|
||||
masked_load(i64, 8)
|
||||
masked_load(double, 8)
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; gather/scatter
|
||||
|
||||
; define these with the macros from stdlib.m4
|
||||
|
||||
gen_gather_factored(i8)
|
||||
gen_gather_factored(i16)
|
||||
gen_gather_factored(i32)
|
||||
gen_gather_factored(float)
|
||||
gen_gather_factored(i64)
|
||||
gen_gather_factored(double)
|
||||
|
||||
gen_scatter(i8)
|
||||
gen_scatter(i16)
|
||||
gen_scatter(i32)
|
||||
gen_scatter(float)
|
||||
gen_scatter(i64)
|
||||
gen_scatter(double)
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; int8/int16 builtins
|
||||
|
||||
declare <16 x i8> @llvm.x86.sse2.pavg.b(<16 x i8>, <16 x i8>) nounwind readnone
|
||||
|
||||
define <16 x i8> @__avg_up_uint8(<16 x i8>, <16 x i8>) nounwind readnone {
|
||||
%r = call <16 x i8> @llvm.x86.sse2.pavg.b(<16 x i8> %0, <16 x i8> %1)
|
||||
ret <16 x i8> %r
|
||||
}
|
||||
|
||||
declare <8 x i16> @llvm.x86.sse2.pavg.w(<8 x i16>, <8 x i16>) nounwind readnone
|
||||
|
||||
define <16 x i16> @__avg_up_uint16(<16 x i16>, <16 x i16>) nounwind readnone {
|
||||
v16tov8(i16, %0, %a0, %b0)
|
||||
v16tov8(i16, %1, %a1, %b1)
|
||||
%r0 = call <8 x i16> @llvm.x86.sse2.pavg.w(<8 x i16> %a0, <8 x i16> %a1)
|
||||
%r1 = call <8 x i16> @llvm.x86.sse2.pavg.w(<8 x i16> %b0, <8 x i16> %b1)
|
||||
v8tov16(i16, %r0, %r1, %r)
|
||||
ret <16 x i16> %r
|
||||
}
|
||||
|
||||
define_avg_up_int8()
|
||||
define_avg_up_int16()
|
||||
define_down_avgs()
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; reciprocals in double precision, if supported
|
||||
|
||||
rsqrtd_decl()
|
||||
rcpd_decl()
|
||||
|
||||
transcendetals_decl()
|
||||
trigonometry_decl()
|
||||
@@ -1,4 +1,4 @@
|
||||
;; Copyright (c) 2010-2011, Intel Corporation
|
||||
;; Copyright (c) 2010-2015, Intel Corporation
|
||||
;; All rights reserved.
|
||||
;;
|
||||
;; Redistribution and use in source and binary forms, with or without
|
||||
@@ -29,10 +29,14 @@
|
||||
;; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
;; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; SSE4 target implementation.
|
||||
|
||||
ctlztz()
|
||||
define_prefetches()
|
||||
define_shuffles()
|
||||
aossoa()
|
||||
rdrand_decls()
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; rounding floats
|
||||
@@ -66,7 +70,7 @@ define float @__round_uniform_float(float) nounwind readonly alwaysinline {
|
||||
define float @__floor_uniform_float(float) nounwind readonly alwaysinline {
|
||||
; see above for round_ss instrinsic discussion...
|
||||
%xi = insertelement <4 x float> undef, float %0, i32 0
|
||||
; roundps, round down 0b01 | don't signal precision exceptions 0b1010 = 9
|
||||
; roundps, round down 0b01 | don't signal precision exceptions 0b1001 = 9
|
||||
%xr = call <4 x float> @llvm.x86.sse41.round.ss(<4 x float> %xi, <4 x float> %xi, i32 9)
|
||||
%rs = extractelement <4 x float> %xr, i32 0
|
||||
ret float %rs
|
||||
@@ -96,7 +100,7 @@ define double @__round_uniform_double(double) nounwind readonly alwaysinline {
|
||||
define double @__floor_uniform_double(double) nounwind readonly alwaysinline {
|
||||
; see above for round_ss instrinsic discussion...
|
||||
%xi = insertelement <2 x double> undef, double %0, i32 0
|
||||
; roundpd, round down 0b01 | don't signal precision exceptions 0b1001 = 9
|
||||
; roundsd, round down 0b01 | don't signal precision exceptions 0b1001 = 9
|
||||
%xr = call <2 x double> @llvm.x86.sse41.round.sd(<2 x double> %xi, <2 x double> %xi, i32 9)
|
||||
%rs = extractelement <2 x double> %xr, i32 0
|
||||
ret double %rs
|
||||
@@ -105,7 +109,7 @@ define double @__floor_uniform_double(double) nounwind readonly alwaysinline {
|
||||
define double @__ceil_uniform_double(double) nounwind readonly alwaysinline {
|
||||
; see above for round_ss instrinsic discussion...
|
||||
%xi = insertelement <2 x double> undef, double %0, i32 0
|
||||
; roundps, round up 0b10 | don't signal precision exceptions 0b1010 = 10
|
||||
; roundsd, round up 0b10 | don't signal precision exceptions 0b1010 = 10
|
||||
%xr = call <2 x double> @llvm.x86.sse41.round.sd(<2 x double> %xi, <2 x double> %xi, i32 10)
|
||||
%rs = extractelement <2 x double> %xr, i32 0
|
||||
ret double %rs
|
||||
@@ -118,6 +122,8 @@ declare <4 x float> @llvm.x86.sse.rcp.ss(<4 x float>) nounwind readnone
|
||||
|
||||
define float @__rcp_uniform_float(float) nounwind readonly alwaysinline {
|
||||
; do the rcpss call
|
||||
; uniform float iv = extract(__rcp_u(v), 0);
|
||||
; return iv * (2. - v * iv);
|
||||
%vecval = insertelement <4 x float> undef, float %0, i32 0
|
||||
%call = call <4 x float> @llvm.x86.sse.rcp.ss(<4 x float> %vecval)
|
||||
%scall = extractelement <4 x float> %call, i32 0
|
||||
@@ -129,9 +135,8 @@ define float @__rcp_uniform_float(float) nounwind readonly alwaysinline {
|
||||
ret float %iv_mul
|
||||
}
|
||||
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
; rsqrt
|
||||
;; rsqrt
|
||||
|
||||
declare <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float>) nounwind readnone
|
||||
|
||||
@@ -153,7 +158,7 @@ define float @__rsqrt_uniform_float(float) nounwind readonly alwaysinline {
|
||||
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
; sqrt
|
||||
;; sqrt
|
||||
|
||||
declare <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float>) nounwind readnone
|
||||
|
||||
@@ -162,6 +167,16 @@ define float @__sqrt_uniform_float(float) nounwind readonly alwaysinline {
|
||||
ret float %ret
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; double precision sqrt
|
||||
|
||||
declare <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double>) nounwind readnone
|
||||
|
||||
define double @__sqrt_uniform_double(double) nounwind alwaysinline {
|
||||
sse_unary_scalar(ret, 2, double, @llvm.x86.sse2.sqrt.sd, %0)
|
||||
ret double %ret
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; fast math mode
|
||||
|
||||
@@ -172,7 +187,7 @@ define void @__fastmath() nounwind alwaysinline {
|
||||
%ptr = alloca i32
|
||||
%ptr8 = bitcast i32 * %ptr to i8 *
|
||||
call void @llvm.x86.sse.stmxcsr(i8 * %ptr8)
|
||||
%oldval = load i32 *%ptr
|
||||
%oldval = load PTR_OP_ARGS(`i32 ') %ptr
|
||||
|
||||
; turn on DAZ (64)/FTZ (32768) -> 32832
|
||||
%update = or i32 %oldval, 32832
|
||||
@@ -197,36 +212,25 @@ define float @__min_uniform_float(float, float) nounwind readonly alwaysinline {
|
||||
ret float %ret
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; double precision sqrt
|
||||
|
||||
declare <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double>) nounwind readnone
|
||||
|
||||
define double @__sqrt_uniform_double(double) nounwind alwaysinline {
|
||||
sse_unary_scalar(ret, 2, double, @llvm.x86.sse2.sqrt.sd, %0)
|
||||
ret double %ret
|
||||
}
|
||||
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; double precision min/max
|
||||
|
||||
declare <2 x double> @llvm.x86.sse2.max.sd(<2 x double>, <2 x double>) nounwind readnone
|
||||
declare <2 x double> @llvm.x86.sse2.min.sd(<2 x double>, <2 x double>) nounwind readnone
|
||||
|
||||
define double @__min_uniform_double(double, double) nounwind readnone {
|
||||
define double @__min_uniform_double(double, double) nounwind readnone alwaysinline {
|
||||
sse_binary_scalar(ret, 2, double, @llvm.x86.sse2.min.sd, %0, %1)
|
||||
ret double %ret
|
||||
}
|
||||
|
||||
|
||||
define double @__max_uniform_double(double, double) nounwind readnone {
|
||||
define double @__max_uniform_double(double, double) nounwind readnone alwaysinline {
|
||||
sse_binary_scalar(ret, 2, double, @llvm.x86.sse2.max.sd, %0, %1)
|
||||
ret double %ret
|
||||
}
|
||||
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; int32 min/max
|
||||
;; int min/max
|
||||
|
||||
declare <4 x i32> @llvm.x86.sse41.pminsd(<4 x i32>, <4 x i32>) nounwind readnone
|
||||
declare <4 x i32> @llvm.x86.sse41.pmaxsd(<4 x i32>, <4 x i32>) nounwind readnone
|
||||
@@ -241,8 +245,9 @@ define i32 @__max_uniform_int32(i32, i32) nounwind readonly alwaysinline {
|
||||
ret i32 %ret
|
||||
}
|
||||
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
; unsigned int min/max
|
||||
;; unsigned int min/max
|
||||
|
||||
declare <4 x i32> @llvm.x86.sse41.pminud(<4 x i32>, <4 x i32>) nounwind readnone
|
||||
declare <4 x i32> @llvm.x86.sse41.pmaxud(<4 x i32>, <4 x i32>) nounwind readnone
|
||||
@@ -257,9 +262,8 @@ define i32 @__max_uniform_uint32(i32, i32) nounwind readonly alwaysinline {
|
||||
ret i32 %ret
|
||||
}
|
||||
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
; horizontal ops / reductions
|
||||
;; horizontal ops / reductions
|
||||
|
||||
declare i32 @llvm.ctpop.i32(i32) nounwind readnone
|
||||
|
||||
@@ -274,3 +278,5 @@ define i64 @__popcnt_int64(i64) nounwind readonly alwaysinline {
|
||||
%call = call i64 @llvm.ctpop.i64(i64 %0)
|
||||
ret i64 %call
|
||||
}
|
||||
|
||||
declare_nvptx()
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
;; Copyright (c) 2010-2011, Intel Corporation
|
||||
;; Copyright (c) 2010-2015, Intel Corporation
|
||||
;; All rights reserved.
|
||||
;;
|
||||
;; Redistribution and use in source and binary forms, with or without
|
||||
@@ -44,9 +44,18 @@ stdlib_core()
|
||||
packed_load_and_store()
|
||||
scans()
|
||||
int64minmax()
|
||||
saturation_arithmetic()
|
||||
|
||||
include(`target-sse4-common.ll')
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; half conversion routines
|
||||
|
||||
declare float @__half_to_float_uniform(i16 %v) nounwind readnone
|
||||
declare <WIDTH x float> @__half_to_float_varying(<WIDTH x i16> %v) nounwind readnone
|
||||
declare i16 @__float_to_half_uniform(float %v) nounwind readnone
|
||||
declare <WIDTH x i16> @__float_to_half_varying(<WIDTH x float> %v) nounwind readnone
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; rcp
|
||||
|
||||
@@ -97,87 +106,14 @@ define <8 x float> @__sqrt_varying_float(<8 x float>) nounwind readonly alwaysin
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
; svml stuff
|
||||
|
||||
declare <4 x float> @__svml_sinf4(<4 x float>) nounwind readnone
|
||||
declare <4 x float> @__svml_cosf4(<4 x float>) nounwind readnone
|
||||
declare <4 x float> @__svml_sincosf4(<4 x float> *, <4 x float>) nounwind readnone
|
||||
declare <4 x float> @__svml_tanf4(<4 x float>) nounwind readnone
|
||||
declare <4 x float> @__svml_atanf4(<4 x float>) nounwind readnone
|
||||
declare <4 x float> @__svml_atan2f4(<4 x float>, <4 x float>) nounwind readnone
|
||||
declare <4 x float> @__svml_expf4(<4 x float>) nounwind readnone
|
||||
declare <4 x float> @__svml_logf4(<4 x float>) nounwind readnone
|
||||
declare <4 x float> @__svml_powf4(<4 x float>, <4 x float>) nounwind readnone
|
||||
include(`svml.m4')
|
||||
;; single precision
|
||||
svml_declare(float,f4,4)
|
||||
svml_define_x(float,f4,4,f,8)
|
||||
|
||||
|
||||
define <8 x float> @__svml_sin(<8 x float>) nounwind readnone alwaysinline {
|
||||
unary4to8(ret, float, @__svml_sinf4, %0)
|
||||
ret <8 x float> %ret
|
||||
}
|
||||
|
||||
define <8 x float> @__svml_cos(<8 x float>) nounwind readnone alwaysinline {
|
||||
unary4to8(ret, float, @__svml_cosf4, %0)
|
||||
ret <8 x float> %ret
|
||||
}
|
||||
|
||||
define void @__svml_sincos(<8 x float>, <8 x float> *,
|
||||
<8 x float> *) nounwind readnone alwaysinline {
|
||||
; call svml_sincosf4 two times with the two 4-wide sub-vectors
|
||||
%a = shufflevector <8 x float> %0, <8 x float> undef,
|
||||
<4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
||||
%b = shufflevector <8 x float> %0, <8 x float> undef,
|
||||
<4 x i32> <i32 4, i32 5, i32 6, i32 7>
|
||||
|
||||
%cospa = alloca <4 x float>
|
||||
%sa = call <4 x float> @__svml_sincosf4(<4 x float> * %cospa, <4 x float> %a)
|
||||
|
||||
%cospb = alloca <4 x float>
|
||||
%sb = call <4 x float> @__svml_sincosf4(<4 x float> * %cospb, <4 x float> %b)
|
||||
|
||||
%sin = shufflevector <4 x float> %sa, <4 x float> %sb,
|
||||
<8 x i32> <i32 0, i32 1, i32 2, i32 3,
|
||||
i32 4, i32 5, i32 6, i32 7>
|
||||
store <8 x float> %sin, <8 x float> * %1
|
||||
|
||||
%cosa = load <4 x float> * %cospa
|
||||
%cosb = load <4 x float> * %cospb
|
||||
%cos = shufflevector <4 x float> %cosa, <4 x float> %cosb,
|
||||
<8 x i32> <i32 0, i32 1, i32 2, i32 3,
|
||||
i32 4, i32 5, i32 6, i32 7>
|
||||
store <8 x float> %cos, <8 x float> * %2
|
||||
|
||||
ret void
|
||||
}
|
||||
|
||||
define <8 x float> @__svml_tan(<8 x float>) nounwind readnone alwaysinline {
|
||||
unary4to8(ret, float, @__svml_tanf4, %0)
|
||||
ret <8 x float> %ret
|
||||
}
|
||||
|
||||
define <8 x float> @__svml_atan(<8 x float>) nounwind readnone alwaysinline {
|
||||
unary4to8(ret, float, @__svml_atanf4, %0)
|
||||
ret <8 x float> %ret
|
||||
}
|
||||
|
||||
define <8 x float> @__svml_atan2(<8 x float>,
|
||||
<8 x float>) nounwind readnone alwaysinline {
|
||||
binary4to8(ret, float, @__svml_atan2f4, %0, %1)
|
||||
ret <8 x float> %ret
|
||||
}
|
||||
|
||||
define <8 x float> @__svml_exp(<8 x float>) nounwind readnone alwaysinline {
|
||||
unary4to8(ret, float, @__svml_expf4, %0)
|
||||
ret <8 x float> %ret
|
||||
}
|
||||
|
||||
define <8 x float> @__svml_log(<8 x float>) nounwind readnone alwaysinline {
|
||||
unary4to8(ret, float, @__svml_logf4, %0)
|
||||
ret <8 x float> %ret
|
||||
}
|
||||
|
||||
define <8 x float> @__svml_pow(<8 x float>,
|
||||
<8 x float>) nounwind readnone alwaysinline {
|
||||
binary4to8(ret, float, @__svml_powf4, %0, %1)
|
||||
ret <8 x float> %ret
|
||||
}
|
||||
;; double precision
|
||||
svml_declare(double,2,2)
|
||||
svml_define_x(double,2,2,d,8)
|
||||
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
@@ -213,13 +149,13 @@ define <8 x i32> @__max_varying_int32(<8 x i32>, <8 x i32>) nounwind readonly al
|
||||
; unsigned int min/max
|
||||
|
||||
define <8 x i32> @__min_varying_uint32(<8 x i32>,
|
||||
<8 x i32>) nounwind readonly alwaysinline {
|
||||
<8 x i32>) nounwind readonly alwaysinline {
|
||||
binary4to8(call, i32, @llvm.x86.sse41.pminud, %0, %1)
|
||||
ret <8 x i32> %call
|
||||
}
|
||||
|
||||
define <8 x i32> @__max_varying_uint32(<8 x i32>,
|
||||
<8 x i32>) nounwind readonly alwaysinline {
|
||||
<8 x i32>) nounwind readonly alwaysinline {
|
||||
binary4to8(call, i32, @llvm.x86.sse41.pmaxud, %0, %1)
|
||||
ret <8 x i32> %call
|
||||
}
|
||||
@@ -229,7 +165,7 @@ define <8 x i32> @__max_varying_uint32(<8 x i32>,
|
||||
|
||||
declare i32 @llvm.x86.sse.movmsk.ps(<4 x float>) nounwind readnone
|
||||
|
||||
define i32 @__movmsk(<8 x i32>) nounwind readnone alwaysinline {
|
||||
define i64 @__movmsk(<8 x i32>) nounwind readnone alwaysinline {
|
||||
; first do two 4-wide movmsk calls
|
||||
%floatmask = bitcast <8 x i32> %0 to <8 x float>
|
||||
%m0 = shufflevector <8 x float> %floatmask, <8 x float> undef,
|
||||
@@ -243,7 +179,92 @@ define i32 @__movmsk(<8 x i32>) nounwind readnone alwaysinline {
|
||||
; of the second one
|
||||
%v1s = shl i32 %v1, 4
|
||||
%v = or i32 %v0, %v1s
|
||||
ret i32 %v
|
||||
%v64 = zext i32 %v to i64
|
||||
ret i64 %v64
|
||||
}
|
||||
|
||||
define i1 @__any(<8 x i32>) nounwind readnone alwaysinline {
|
||||
; first do two 4-wide movmsk calls
|
||||
%floatmask = bitcast <8 x i32> %0 to <8 x float>
|
||||
%m0 = shufflevector <8 x float> %floatmask, <8 x float> undef,
|
||||
<4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
||||
%v0 = call i32 @llvm.x86.sse.movmsk.ps(<4 x float> %m0) nounwind readnone
|
||||
%m1 = shufflevector <8 x float> %floatmask, <8 x float> undef,
|
||||
<4 x i32> <i32 4, i32 5, i32 6, i32 7>
|
||||
%v1 = call i32 @llvm.x86.sse.movmsk.ps(<4 x float> %m1) nounwind readnone
|
||||
|
||||
; and shift the first one over by 4 before ORing it with the value
|
||||
; of the second one
|
||||
%v1s = shl i32 %v1, 4
|
||||
%v = or i32 %v0, %v1s
|
||||
%cmp = icmp ne i32 %v, 0
|
||||
ret i1 %cmp
|
||||
}
|
||||
|
||||
define i1 @__all(<8 x i32>) nounwind readnone alwaysinline {
|
||||
; first do two 4-wide movmsk calls
|
||||
%floatmask = bitcast <8 x i32> %0 to <8 x float>
|
||||
%m0 = shufflevector <8 x float> %floatmask, <8 x float> undef,
|
||||
<4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
||||
%v0 = call i32 @llvm.x86.sse.movmsk.ps(<4 x float> %m0) nounwind readnone
|
||||
%m1 = shufflevector <8 x float> %floatmask, <8 x float> undef,
|
||||
<4 x i32> <i32 4, i32 5, i32 6, i32 7>
|
||||
%v1 = call i32 @llvm.x86.sse.movmsk.ps(<4 x float> %m1) nounwind readnone
|
||||
|
||||
; and shift the first one over by 4 before ORing it with the value
|
||||
; of the second one
|
||||
%v1s = shl i32 %v1, 4
|
||||
%v = or i32 %v0, %v1s
|
||||
%cmp = icmp eq i32 %v, 255
|
||||
ret i1 %cmp
|
||||
}
|
||||
|
||||
define i1 @__none(<8 x i32>) nounwind readnone alwaysinline {
|
||||
; first do two 4-wide movmsk calls
|
||||
%floatmask = bitcast <8 x i32> %0 to <8 x float>
|
||||
%m0 = shufflevector <8 x float> %floatmask, <8 x float> undef,
|
||||
<4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
||||
%v0 = call i32 @llvm.x86.sse.movmsk.ps(<4 x float> %m0) nounwind readnone
|
||||
%m1 = shufflevector <8 x float> %floatmask, <8 x float> undef,
|
||||
<4 x i32> <i32 4, i32 5, i32 6, i32 7>
|
||||
%v1 = call i32 @llvm.x86.sse.movmsk.ps(<4 x float> %m1) nounwind readnone
|
||||
|
||||
; and shift the first one over by 4 before ORing it with the value
|
||||
; of the second one
|
||||
%v1s = shl i32 %v1, 4
|
||||
%v = or i32 %v0, %v1s
|
||||
%cmp = icmp eq i32 %v, 0
|
||||
ret i1 %cmp
|
||||
}
|
||||
|
||||
declare <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8>, <16 x i8>) nounwind readnone
|
||||
|
||||
define i16 @__reduce_add_int8(<8 x i8>) nounwind readnone alwaysinline {
|
||||
%wide8 = shufflevector <8 x i8> %0, <8 x i8> zeroinitializer,
|
||||
<16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
|
||||
i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8>
|
||||
%rv = call <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8> %wide8,
|
||||
<16 x i8> zeroinitializer)
|
||||
%r0 = extractelement <2 x i64> %rv, i32 0
|
||||
%r1 = extractelement <2 x i64> %rv, i32 1
|
||||
%r = add i64 %r0, %r1
|
||||
%r16 = trunc i64 %r to i16
|
||||
ret i16 %r16
|
||||
}
|
||||
|
||||
define internal <8 x i16> @__add_varying_i16(<8 x i16>,
|
||||
<8 x i16>) nounwind readnone alwaysinline {
|
||||
%r = add <8 x i16> %0, %1
|
||||
ret <8 x i16> %r
|
||||
}
|
||||
|
||||
define internal i16 @__add_uniform_i16(i16, i16) nounwind readnone alwaysinline {
|
||||
%r = add i16 %0, %1
|
||||
ret i16 %r
|
||||
}
|
||||
|
||||
define i16 @__reduce_add_int16(<8 x i16>) nounwind readnone alwaysinline {
|
||||
reduce8(i16, @__add_varying_i16, @__add_uniform_i16)
|
||||
}
|
||||
|
||||
define float @__reduce_min_float(<8 x float>) nounwind readnone alwaysinline {
|
||||
@@ -279,11 +300,6 @@ define i32 @__reduce_max_int32(<8 x i32>) nounwind readnone alwaysinline {
|
||||
reduce8by4(i32, @llvm.x86.sse41.pmaxsd, @__max_uniform_int32)
|
||||
}
|
||||
|
||||
define i32 @__reduce_add_uint32(<8 x i32> %v) nounwind readnone alwaysinline {
|
||||
%r = call i32 @__reduce_add_int32(<8 x i32> %v)
|
||||
ret i32 %r
|
||||
}
|
||||
|
||||
define i32 @__reduce_min_uint32(<8 x i32>) nounwind readnone alwaysinline {
|
||||
reduce8by4(i32, @llvm.x86.sse41.pminud, @__min_uniform_uint32)
|
||||
}
|
||||
@@ -316,7 +332,7 @@ define double @__reduce_max_double(<8 x double>) nounwind readnone {
|
||||
}
|
||||
|
||||
define <4 x i64> @__add_varying_int64(<4 x i64>,
|
||||
<4 x i64>) nounwind readnone alwaysinline {
|
||||
<4 x i64>) nounwind readnone alwaysinline {
|
||||
%r = add <4 x i64> %0, %1
|
||||
ret <4 x i64> %r
|
||||
}
|
||||
@@ -351,28 +367,30 @@ reduce_equal(8)
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; unaligned loads/loads+broadcasts
|
||||
|
||||
load_and_broadcast(8, i8, 8)
|
||||
load_and_broadcast(8, i16, 16)
|
||||
load_and_broadcast(8, i32, 32)
|
||||
load_and_broadcast(8, i64, 64)
|
||||
|
||||
masked_load(8, i8, 8, 1)
|
||||
masked_load(8, i16, 16, 2)
|
||||
masked_load(8, i32, 32, 4)
|
||||
masked_load(8, i64, 64, 8)
|
||||
masked_load(i8, 1)
|
||||
masked_load(i16, 2)
|
||||
masked_load(i32, 4)
|
||||
masked_load(float, 4)
|
||||
masked_load(i64, 8)
|
||||
masked_load(double, 8)
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; gather/scatter
|
||||
|
||||
gen_gather(8, i8)
|
||||
gen_gather(8, i16)
|
||||
gen_gather(8, i32)
|
||||
gen_gather(8, i64)
|
||||
gen_gather_factored(i8)
|
||||
gen_gather_factored(i16)
|
||||
gen_gather_factored(i32)
|
||||
gen_gather_factored(float)
|
||||
gen_gather_factored(i64)
|
||||
gen_gather_factored(double)
|
||||
|
||||
gen_scatter(8, i8)
|
||||
gen_scatter(8, i16)
|
||||
gen_scatter(8, i32)
|
||||
gen_scatter(8, i64)
|
||||
gen_scatter(i8)
|
||||
gen_scatter(i16)
|
||||
gen_scatter(i32)
|
||||
gen_scatter(float)
|
||||
gen_scatter(i64)
|
||||
gen_scatter(double)
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; float rounding
|
||||
@@ -435,25 +453,25 @@ define float @__reduce_add_float(<8 x float>) nounwind readonly alwaysinline {
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; masked store
|
||||
|
||||
gen_masked_store(8, i8, 8)
|
||||
gen_masked_store(8, i16, 16)
|
||||
gen_masked_store(8, i32, 32)
|
||||
gen_masked_store(8, i64, 64)
|
||||
gen_masked_store(i8)
|
||||
gen_masked_store(i16)
|
||||
gen_masked_store(i32)
|
||||
gen_masked_store(i64)
|
||||
|
||||
masked_store_blend_8_16_by_8()
|
||||
|
||||
declare <4 x float> @llvm.x86.sse41.blendvps(<4 x float>, <4 x float>,
|
||||
<4 x float>) nounwind readnone
|
||||
|
||||
define void @__masked_store_blend_32(<8 x i32>* nocapture, <8 x i32>,
|
||||
<8 x i32> %mask) nounwind alwaysinline {
|
||||
define void @__masked_store_blend_i32(<8 x i32>* nocapture, <8 x i32>,
|
||||
<8 x i32> %mask) nounwind alwaysinline {
|
||||
; do two 4-wide blends with blendvps
|
||||
%mask_as_float = bitcast <8 x i32> %mask to <8 x float>
|
||||
%mask_a = shufflevector <8 x float> %mask_as_float, <8 x float> undef,
|
||||
<4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
||||
%mask_b = shufflevector <8 x float> %mask_as_float, <8 x float> undef,
|
||||
<4 x i32> <i32 4, i32 5, i32 6, i32 7>
|
||||
%oldValue = load <8 x i32>* %0, align 4
|
||||
%oldValue = load PTR_OP_ARGS(`<8 x i32>') %0, align 4
|
||||
%oldAsFloat = bitcast <8 x i32> %oldValue to <8 x float>
|
||||
%newAsFloat = bitcast <8 x i32> %1 to <8 x float>
|
||||
%old_a = shufflevector <8 x float> %oldAsFloat, <8 x float> undef,
|
||||
@@ -475,14 +493,14 @@ define void @__masked_store_blend_32(<8 x i32>* nocapture, <8 x i32>,
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @__masked_store_blend_64(<8 x i64>* nocapture %ptr, <8 x i64> %new,
|
||||
<8 x i32> %mask) nounwind alwaysinline {
|
||||
define void @__masked_store_blend_i64(<8 x i64>* nocapture %ptr, <8 x i64> %new,
|
||||
<8 x i32> %mask) nounwind alwaysinline {
|
||||
; implement this as 4 blends of <4 x i32>s, which are actually bitcast
|
||||
; <2 x i64>s...
|
||||
|
||||
%mask_as_float = bitcast <8 x i32> %mask to <8 x float>
|
||||
|
||||
%old = load <8 x i64>* %ptr, align 8
|
||||
%old = load PTR_OP_ARGS(`<8 x i64>') %ptr, align 8
|
||||
|
||||
; set up the first two 64-bit values
|
||||
%old01 = shufflevector <8 x i64> %old, <8 x i64> undef, <2 x i32> <i32 0, i32 1>
|
||||
@@ -542,6 +560,7 @@ define void @__masked_store_blend_64(<8 x i64>* nocapture %ptr, <8 x i64> %new,
|
||||
ret void
|
||||
}
|
||||
|
||||
masked_store_float_double()
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; double precision sqrt
|
||||
@@ -568,3 +587,17 @@ define <8 x double> @__max_varying_double(<8 x double>, <8 x double>) nounwind r
|
||||
binary2to8(ret, double, @llvm.x86.sse2.max.pd, %0, %1)
|
||||
ret <8 x double> %ret
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; int8/int16 builtins
|
||||
|
||||
define_avgs()
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; reciprocals in double precision, if supported
|
||||
|
||||
rsqrtd_decl()
|
||||
rcpd_decl()
|
||||
|
||||
transcendetals_decl()
|
||||
trigonometry_decl()
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
;; Copyright (c) 2010-2011, Intel Corporation
|
||||
;; Copyright (c) 2010-2015, Intel Corporation
|
||||
;; All rights reserved.
|
||||
;;
|
||||
;; Redistribution and use in source and binary forms, with or without
|
||||
@@ -41,19 +41,28 @@ stdlib_core()
|
||||
packed_load_and_store()
|
||||
scans()
|
||||
int64minmax()
|
||||
saturation_arithmetic()
|
||||
|
||||
include(`target-sse4-common.ll')
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; half conversion routines
|
||||
|
||||
declare float @__half_to_float_uniform(i16 %v) nounwind readnone
|
||||
declare <WIDTH x float> @__half_to_float_varying(<WIDTH x i16> %v) nounwind readnone
|
||||
declare i16 @__float_to_half_uniform(float %v) nounwind readnone
|
||||
declare <WIDTH x i16> @__float_to_half_varying(<WIDTH x float> %v) nounwind readnone
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; rcp
|
||||
|
||||
declare <4 x float> @llvm.x86.sse.rcp.ps(<4 x float>) nounwind readnone
|
||||
|
||||
define <4 x float> @__rcp_varying_float(<4 x float>) nounwind readonly alwaysinline {
|
||||
%call = call <4 x float> @llvm.x86.sse.rcp.ps(<4 x float> %0)
|
||||
; do one N-R iteration to improve precision
|
||||
; float iv = __rcp_v(v);
|
||||
; return iv * (2. - v * iv);
|
||||
%call = call <4 x float> @llvm.x86.sse.rcp.ps(<4 x float> %0)
|
||||
%v_iv = fmul <4 x float> %0, %call
|
||||
%two_minus = fsub <4 x float> <float 2., float 2., float 2., float 2.>, %v_iv
|
||||
%iv_mul = fmul <4 x float> %call, %two_minus
|
||||
@@ -79,7 +88,7 @@ define <4 x float> @__rsqrt_varying_float(<4 x float> %v) nounwind readonly alwa
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
; sqrt
|
||||
;; sqrt
|
||||
|
||||
declare <4 x float> @llvm.x86.sse.sqrt.ps(<4 x float>) nounwind readnone
|
||||
|
||||
@@ -146,16 +155,34 @@ define <4 x double> @__ceil_varying_double(<4 x double>) nounwind readonly alway
|
||||
declare <4 x float> @llvm.x86.sse.max.ps(<4 x float>, <4 x float>) nounwind readnone
|
||||
declare <4 x float> @llvm.x86.sse.min.ps(<4 x float>, <4 x float>) nounwind readnone
|
||||
|
||||
define <4 x float> @__max_varying_float(<4 x float>, <4 x float>) nounwind readonly alwaysinline {
|
||||
define <4 x float> @__max_varying_float(<4 x float>,
|
||||
<4 x float>) nounwind readonly alwaysinline {
|
||||
%call = call <4 x float> @llvm.x86.sse.max.ps(<4 x float> %0, <4 x float> %1)
|
||||
ret <4 x float> %call
|
||||
}
|
||||
|
||||
define <4 x float> @__min_varying_float(<4 x float>, <4 x float>) nounwind readonly alwaysinline {
|
||||
define <4 x float> @__min_varying_float(<4 x float>,
|
||||
<4 x float>) nounwind readonly alwaysinline {
|
||||
%call = call <4 x float> @llvm.x86.sse.min.ps(<4 x float> %0, <4 x float> %1)
|
||||
ret <4 x float> %call
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; double precision min/max
|
||||
|
||||
declare <2 x double> @llvm.x86.sse2.max.pd(<2 x double>, <2 x double>) nounwind readnone
|
||||
declare <2 x double> @llvm.x86.sse2.min.pd(<2 x double>, <2 x double>) nounwind readnone
|
||||
|
||||
define <4 x double> @__min_varying_double(<4 x double>, <4 x double>) nounwind readnone {
|
||||
binary2to4(ret, double, @llvm.x86.sse2.min.pd, %0, %1)
|
||||
ret <4 x double> %ret
|
||||
}
|
||||
|
||||
define <4 x double> @__max_varying_double(<4 x double>, <4 x double>) nounwind readnone {
|
||||
binary2to4(ret, double, @llvm.x86.sse2.max.pd, %0, %1)
|
||||
ret <4 x double> %ret
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; int32 min/max
|
||||
|
||||
@@ -183,92 +210,56 @@ define <4 x i32> @__max_varying_uint32(<4 x i32>, <4 x i32>) nounwind readonly a
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; double precision min/max
|
||||
;; svml stuff
|
||||
|
||||
declare <2 x double> @llvm.x86.sse2.max.pd(<2 x double>, <2 x double>) nounwind readnone
|
||||
declare <2 x double> @llvm.x86.sse2.min.pd(<2 x double>, <2 x double>) nounwind readnone
|
||||
include(`svml.m4')
|
||||
;; single precision
|
||||
svml_declare(float,f4,4)
|
||||
svml_define(float,f4,4,f)
|
||||
|
||||
define <4 x double> @__min_varying_double(<4 x double>, <4 x double>) nounwind readnone {
|
||||
binary2to4(ret, double, @llvm.x86.sse2.min.pd, %0, %1)
|
||||
ret <4 x double> %ret
|
||||
}
|
||||
|
||||
define <4 x double> @__max_varying_double(<4 x double>, <4 x double>) nounwind readnone {
|
||||
binary2to4(ret, double, @llvm.x86.sse2.max.pd, %0, %1)
|
||||
ret <4 x double> %ret
|
||||
}
|
||||
;; double precision
|
||||
svml_declare(double,2,2)
|
||||
svml_define_x(double,2,2,d,4)
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
; svml stuff
|
||||
|
||||
declare <4 x float> @__svml_sinf4(<4 x float>) nounwind readnone
|
||||
declare <4 x float> @__svml_cosf4(<4 x float>) nounwind readnone
|
||||
declare <4 x float> @__svml_sincosf4(<4 x float> *, <4 x float>) nounwind readnone
|
||||
declare <4 x float> @__svml_tanf4(<4 x float>) nounwind readnone
|
||||
declare <4 x float> @__svml_atanf4(<4 x float>) nounwind readnone
|
||||
declare <4 x float> @__svml_atan2f4(<4 x float>, <4 x float>) nounwind readnone
|
||||
declare <4 x float> @__svml_expf4(<4 x float>) nounwind readnone
|
||||
declare <4 x float> @__svml_logf4(<4 x float>) nounwind readnone
|
||||
declare <4 x float> @__svml_powf4(<4 x float>, <4 x float>) nounwind readnone
|
||||
|
||||
|
||||
define <4 x float> @__svml_sin(<4 x float>) nounwind readnone alwaysinline {
|
||||
%ret = call <4 x float> @__svml_sinf4(<4 x float> %0)
|
||||
ret <4 x float> %ret
|
||||
}
|
||||
|
||||
define <4 x float> @__svml_cos(<4 x float>) nounwind readnone alwaysinline {
|
||||
%ret = call <4 x float> @__svml_cosf4(<4 x float> %0)
|
||||
ret <4 x float> %ret
|
||||
}
|
||||
|
||||
define void @__svml_sincos(<4 x float>, <4 x float> *, <4 x float> *) nounwind readnone alwaysinline {
|
||||
%s = call <4 x float> @__svml_sincosf4(<4 x float> * %2, <4 x float> %0)
|
||||
store <4 x float> %s, <4 x float> * %1
|
||||
ret void
|
||||
}
|
||||
|
||||
define <4 x float> @__svml_tan(<4 x float>) nounwind readnone alwaysinline {
|
||||
%ret = call <4 x float> @__svml_tanf4(<4 x float> %0)
|
||||
ret <4 x float> %ret
|
||||
}
|
||||
|
||||
define <4 x float> @__svml_atan(<4 x float>) nounwind readnone alwaysinline {
|
||||
%ret = call <4 x float> @__svml_atanf4(<4 x float> %0)
|
||||
ret <4 x float> %ret
|
||||
}
|
||||
|
||||
define <4 x float> @__svml_atan2(<4 x float>, <4 x float>) nounwind readnone alwaysinline {
|
||||
%ret = call <4 x float> @__svml_atan2f4(<4 x float> %0, <4 x float> %1)
|
||||
ret <4 x float> %ret
|
||||
}
|
||||
|
||||
define <4 x float> @__svml_exp(<4 x float>) nounwind readnone alwaysinline {
|
||||
%ret = call <4 x float> @__svml_expf4(<4 x float> %0)
|
||||
ret <4 x float> %ret
|
||||
}
|
||||
|
||||
define <4 x float> @__svml_log(<4 x float>) nounwind readnone alwaysinline {
|
||||
%ret = call <4 x float> @__svml_logf4(<4 x float> %0)
|
||||
ret <4 x float> %ret
|
||||
}
|
||||
|
||||
define <4 x float> @__svml_pow(<4 x float>, <4 x float>) nounwind readnone alwaysinline {
|
||||
%ret = call <4 x float> @__svml_powf4(<4 x float> %0, <4 x float> %1)
|
||||
ret <4 x float> %ret
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
; horizontal ops / reductions
|
||||
;; mask handling
|
||||
|
||||
declare i32 @llvm.x86.sse.movmsk.ps(<4 x float>) nounwind readnone
|
||||
|
||||
define i32 @__movmsk(<4 x i32>) nounwind readnone alwaysinline {
|
||||
define i64 @__movmsk(<4 x i32>) nounwind readnone alwaysinline {
|
||||
%floatmask = bitcast <4 x i32> %0 to <4 x float>
|
||||
%v = call i32 @llvm.x86.sse.movmsk.ps(<4 x float> %floatmask) nounwind readnone
|
||||
ret i32 %v
|
||||
%v64 = zext i32 %v to i64
|
||||
ret i64 %v64
|
||||
}
|
||||
|
||||
define i1 @__any(<4 x i32>) nounwind readnone alwaysinline {
|
||||
%floatmask = bitcast <4 x i32> %0 to <4 x float>
|
||||
%v = call i32 @llvm.x86.sse.movmsk.ps(<4 x float> %floatmask) nounwind readnone
|
||||
%cmp = icmp ne i32 %v, 0
|
||||
ret i1 %cmp
|
||||
}
|
||||
|
||||
define i1 @__all(<4 x i32>) nounwind readnone alwaysinline {
|
||||
%floatmask = bitcast <4 x i32> %0 to <4 x float>
|
||||
%v = call i32 @llvm.x86.sse.movmsk.ps(<4 x float> %floatmask) nounwind readnone
|
||||
%cmp = icmp eq i32 %v, 15
|
||||
ret i1 %cmp
|
||||
}
|
||||
|
||||
define i1 @__none(<4 x i32>) nounwind readnone alwaysinline {
|
||||
%floatmask = bitcast <4 x i32> %0 to <4 x float>
|
||||
%v = call i32 @llvm.x86.sse.movmsk.ps(<4 x float> %floatmask) nounwind readnone
|
||||
%cmp = icmp eq i32 %v, 0
|
||||
ret i1 %cmp
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; horizontal ops / reductions
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; horizontal float ops
|
||||
|
||||
declare <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float>, <4 x float>) nounwind readnone
|
||||
|
||||
define float @__reduce_add_float(<4 x float>) nounwind readonly alwaysinline {
|
||||
@@ -278,47 +269,18 @@ define float @__reduce_add_float(<4 x float>) nounwind readonly alwaysinline {
|
||||
ret float %scalar
|
||||
}
|
||||
|
||||
define float @__reduce_min_float(<4 x float>) nounwind readnone {
|
||||
define float @__reduce_min_float(<4 x float>) nounwind readnone alwaysinline {
|
||||
reduce4(float, @__min_varying_float, @__min_uniform_float)
|
||||
}
|
||||
|
||||
define float @__reduce_max_float(<4 x float>) nounwind readnone {
|
||||
define float @__reduce_max_float(<4 x float>) nounwind readnone alwaysinline {
|
||||
reduce4(float, @__max_varying_float, @__max_uniform_float)
|
||||
}
|
||||
|
||||
define i32 @__reduce_add_int32(<4 x i32> %v) nounwind readnone {
|
||||
%v1 = shufflevector <4 x i32> %v, <4 x i32> undef,
|
||||
<4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
|
||||
%m1 = add <4 x i32> %v1, %v
|
||||
%m1a = extractelement <4 x i32> %m1, i32 0
|
||||
%m1b = extractelement <4 x i32> %m1, i32 1
|
||||
%sum = add i32 %m1a, %m1b
|
||||
ret i32 %sum
|
||||
}
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; horizontal double ops
|
||||
|
||||
define i32 @__reduce_min_int32(<4 x i32>) nounwind readnone {
|
||||
reduce4(i32, @__min_varying_int32, @__min_uniform_int32)
|
||||
}
|
||||
|
||||
define i32 @__reduce_max_int32(<4 x i32>) nounwind readnone {
|
||||
reduce4(i32, @__max_varying_int32, @__max_uniform_int32)
|
||||
}
|
||||
|
||||
define i32 @__reduce_add_uint32(<4 x i32> %v) nounwind readnone {
|
||||
%r = call i32 @__reduce_add_int32(<4 x i32> %v)
|
||||
ret i32 %r
|
||||
}
|
||||
|
||||
define i32 @__reduce_min_uint32(<4 x i32>) nounwind readnone {
|
||||
reduce4(i32, @__min_varying_uint32, @__min_uniform_uint32)
|
||||
}
|
||||
|
||||
define i32 @__reduce_max_uint32(<4 x i32>) nounwind readnone {
|
||||
reduce4(i32, @__max_varying_uint32, @__max_uniform_uint32)
|
||||
}
|
||||
|
||||
|
||||
define double @__reduce_add_double(<4 x double>) nounwind readnone {
|
||||
define double @__reduce_add_double(<4 x double>) nounwind readnone alwaysinline {
|
||||
%v0 = shufflevector <4 x double> %0, <4 x double> undef,
|
||||
<2 x i32> <i32 0, i32 1>
|
||||
%v1 = shufflevector <4 x double> %0, <4 x double> undef,
|
||||
@@ -330,15 +292,85 @@ define double @__reduce_add_double(<4 x double>) nounwind readnone {
|
||||
ret double %m
|
||||
}
|
||||
|
||||
define double @__reduce_min_double(<4 x double>) nounwind readnone {
|
||||
define double @__reduce_min_double(<4 x double>) nounwind readnone alwaysinline {
|
||||
reduce4(double, @__min_varying_double, @__min_uniform_double)
|
||||
}
|
||||
|
||||
define double @__reduce_max_double(<4 x double>) nounwind readnone {
|
||||
define double @__reduce_max_double(<4 x double>) nounwind readnone alwaysinline {
|
||||
reduce4(double, @__max_varying_double, @__max_uniform_double)
|
||||
}
|
||||
|
||||
define i64 @__reduce_add_int64(<4 x i64>) nounwind readnone {
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; horizontal int8 ops
|
||||
|
||||
declare <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8>, <16 x i8>) nounwind readnone
|
||||
|
||||
define i16 @__reduce_add_int8(<4 x i8>) nounwind readnone alwaysinline {
|
||||
%wide8 = shufflevector <4 x i8> %0, <4 x i8> zeroinitializer,
|
||||
<16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 4, i32 4, i32 4,
|
||||
i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>
|
||||
%rv = call <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8> %wide8,
|
||||
<16 x i8> zeroinitializer)
|
||||
%r0 = extractelement <2 x i64> %rv, i32 0
|
||||
%r1 = extractelement <2 x i64> %rv, i32 1
|
||||
%r = add i64 %r0, %r1
|
||||
%r16 = trunc i64 %r to i16
|
||||
ret i16 %r16
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; horizontal int16 ops
|
||||
|
||||
define internal <4 x i16> @__add_varying_i16(<4 x i16>,
|
||||
<4 x i16>) nounwind readnone alwaysinline {
|
||||
%r = add <4 x i16> %0, %1
|
||||
ret <4 x i16> %r
|
||||
}
|
||||
|
||||
define internal i16 @__add_uniform_i16(i16, i16) nounwind readnone alwaysinline {
|
||||
%r = add i16 %0, %1
|
||||
ret i16 %r
|
||||
}
|
||||
|
||||
define i16 @__reduce_add_int16(<4 x i16>) nounwind readnone alwaysinline {
|
||||
reduce4(i16, @__add_varying_i16, @__add_uniform_i16)
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; horizontal int32 ops
|
||||
|
||||
;; reduction functions
|
||||
define i32 @__reduce_add_int32(<4 x i32> %v) nounwind readnone alwaysinline {
|
||||
%v1 = shufflevector <4 x i32> %v, <4 x i32> undef,
|
||||
<4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
|
||||
%m1 = add <4 x i32> %v1, %v
|
||||
%m1a = extractelement <4 x i32> %m1, i32 0
|
||||
%m1b = extractelement <4 x i32> %m1, i32 1
|
||||
%sum = add i32 %m1a, %m1b
|
||||
ret i32 %sum
|
||||
}
|
||||
|
||||
define i32 @__reduce_min_int32(<4 x i32>) nounwind readnone alwaysinline {
|
||||
reduce4(i32, @__min_varying_int32, @__min_uniform_int32)
|
||||
}
|
||||
|
||||
define i32 @__reduce_max_int32(<4 x i32>) nounwind readnone alwaysinline {
|
||||
reduce4(i32, @__max_varying_int32, @__max_uniform_int32)
|
||||
}
|
||||
|
||||
define i32 @__reduce_min_uint32(<4 x i32>) nounwind readnone alwaysinline {
|
||||
reduce4(i32, @__min_varying_uint32, @__min_uniform_uint32)
|
||||
}
|
||||
|
||||
define i32 @__reduce_max_uint32(<4 x i32>) nounwind readnone alwaysinline {
|
||||
reduce4(i32, @__max_varying_uint32, @__max_uniform_uint32)
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; horizontal int64 ops
|
||||
|
||||
;; reduction functions
|
||||
define i64 @__reduce_add_int64(<4 x i64>) nounwind readnone alwaysinline {
|
||||
%v0 = shufflevector <4 x i64> %0, <4 x i64> undef,
|
||||
<2 x i32> <i32 0, i32 1>
|
||||
%v1 = shufflevector <4 x i64> %0, <4 x i64> undef,
|
||||
@@ -350,35 +382,58 @@ define i64 @__reduce_add_int64(<4 x i64>) nounwind readnone {
|
||||
ret i64 %m
|
||||
}
|
||||
|
||||
define i64 @__reduce_min_int64(<4 x i64>) nounwind readnone {
|
||||
define i64 @__reduce_min_int64(<4 x i64>) nounwind readnone alwaysinline {
|
||||
reduce4(i64, @__min_varying_int64, @__min_uniform_int64)
|
||||
}
|
||||
|
||||
define i64 @__reduce_max_int64(<4 x i64>) nounwind readnone {
|
||||
define i64 @__reduce_max_int64(<4 x i64>) nounwind readnone alwaysinline {
|
||||
reduce4(i64, @__max_varying_int64, @__max_uniform_int64)
|
||||
}
|
||||
|
||||
define i64 @__reduce_min_uint64(<4 x i64>) nounwind readnone {
|
||||
define i64 @__reduce_min_uint64(<4 x i64>) nounwind readnone alwaysinline {
|
||||
reduce4(i64, @__min_varying_uint64, @__min_uniform_uint64)
|
||||
}
|
||||
|
||||
define i64 @__reduce_max_uint64(<4 x i64>) nounwind readnone {
|
||||
define i64 @__reduce_max_uint64(<4 x i64>) nounwind readnone alwaysinline {
|
||||
reduce4(i64, @__max_varying_uint64, @__max_uniform_uint64)
|
||||
}
|
||||
|
||||
reduce_equal(4)
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; unaligned loads/loads+broadcasts
|
||||
|
||||
|
||||
masked_load(i8, 1)
|
||||
masked_load(i16, 2)
|
||||
masked_load(i32, 4)
|
||||
masked_load(float, 4)
|
||||
masked_load(i64, 8)
|
||||
masked_load(double, 8)
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; masked store
|
||||
|
||||
gen_masked_store(i8)
|
||||
gen_masked_store(i16)
|
||||
gen_masked_store(i32)
|
||||
gen_masked_store(i64)
|
||||
|
||||
masked_store_float_double()
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; masked store blend
|
||||
|
||||
masked_store_blend_8_16_by_4()
|
||||
|
||||
declare <4 x float> @llvm.x86.sse41.blendvps(<4 x float>, <4 x float>,
|
||||
<4 x float>) nounwind readnone
|
||||
|
||||
|
||||
define void @__masked_store_blend_32(<4 x i32>* nocapture, <4 x i32>,
|
||||
<4 x i32> %mask) nounwind alwaysinline {
|
||||
define void @__masked_store_blend_i32(<4 x i32>* nocapture, <4 x i32>,
|
||||
<4 x i32> %mask) nounwind alwaysinline {
|
||||
%mask_as_float = bitcast <4 x i32> %mask to <4 x float>
|
||||
%oldValue = load <4 x i32>* %0, align 4
|
||||
%oldValue = load PTR_OP_ARGS(`<4 x i32>') %0, align 4
|
||||
%oldAsFloat = bitcast <4 x i32> %oldValue to <4 x float>
|
||||
%newAsFloat = bitcast <4 x i32> %1 to <4 x float>
|
||||
%blend = call <4 x float> @llvm.x86.sse41.blendvps(<4 x float> %oldAsFloat,
|
||||
@@ -390,9 +445,9 @@ define void @__masked_store_blend_32(<4 x i32>* nocapture, <4 x i32>,
|
||||
}
|
||||
|
||||
|
||||
define void @__masked_store_blend_64(<4 x i64>* nocapture %ptr, <4 x i64> %new,
|
||||
<4 x i32> %i32mask) nounwind alwaysinline {
|
||||
%oldValue = load <4 x i64>* %ptr, align 8
|
||||
define void @__masked_store_blend_i64(<4 x i64>* nocapture %ptr, <4 x i64> %new,
|
||||
<4 x i32> %i32mask) nounwind alwaysinline {
|
||||
%oldValue = load PTR_OP_ARGS(`<4 x i64>') %ptr, align 8
|
||||
%mask = bitcast <4 x i32> %i32mask to <4 x float>
|
||||
|
||||
; Do 4x64-bit blends by doing two <4 x i32> blends, where the <4 x i32> values
|
||||
@@ -437,40 +492,35 @@ define void @__masked_store_blend_64(<4 x i64>* nocapture %ptr, <4 x i64> %new,
|
||||
ret void
|
||||
}
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; masked store
|
||||
|
||||
masked_store_blend_8_16_by_4()
|
||||
|
||||
gen_masked_store(4, i8, 8)
|
||||
gen_masked_store(4, i16, 16)
|
||||
gen_masked_store(4, i32, 32)
|
||||
gen_masked_store(4, i64, 64)
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; unaligned loads/loads+broadcasts
|
||||
|
||||
load_and_broadcast(4, i8, 8)
|
||||
load_and_broadcast(4, i16, 16)
|
||||
load_and_broadcast(4, i32, 32)
|
||||
load_and_broadcast(4, i64, 64)
|
||||
|
||||
masked_load(4, i8, 8, 1)
|
||||
masked_load(4, i16, 16, 2)
|
||||
masked_load(4, i32, 32, 4)
|
||||
masked_load(4, i64, 64, 8)
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; gather/scatter
|
||||
|
||||
; define these with the macros from stdlib.m4
|
||||
|
||||
gen_gather(4, i8)
|
||||
gen_gather(4, i16)
|
||||
gen_gather(4, i32)
|
||||
gen_gather(4, i64)
|
||||
gen_gather_factored(i8)
|
||||
gen_gather_factored(i16)
|
||||
gen_gather_factored(i32)
|
||||
gen_gather_factored(float)
|
||||
gen_gather_factored(i64)
|
||||
gen_gather_factored(double)
|
||||
|
||||
gen_scatter(4, i8)
|
||||
gen_scatter(4, i16)
|
||||
gen_scatter(4, i32)
|
||||
gen_scatter(4, i64)
|
||||
gen_scatter(i8)
|
||||
gen_scatter(i16)
|
||||
gen_scatter(i32)
|
||||
gen_scatter(float)
|
||||
gen_scatter(i64)
|
||||
gen_scatter(double)
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; int8/int16 builtins
|
||||
|
||||
define_avgs()
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; reciprocals in double precision, if supported
|
||||
|
||||
rsqrtd_decl()
|
||||
rcpd_decl()
|
||||
|
||||
transcendetals_decl()
|
||||
trigonometry_decl()
|
||||
|
||||
3511
builtins/util-nvptx.m4
Normal file
3511
builtins/util-nvptx.m4
Normal file
File diff suppressed because it is too large
Load Diff
3612
builtins/util.m4
3612
builtins/util.m4
File diff suppressed because it is too large
Load Diff
4073
cbackend.cpp
4073
cbackend.cpp
File diff suppressed because it is too large
Load Diff
102
check_env.py
Executable file
102
check_env.py
Executable file
@@ -0,0 +1,102 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2013, Intel Corporation
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
#
|
||||
# * Redistributions in binary form must reproduce the above copyright
|
||||
# notice, this list of conditions and the following disclaimer in the
|
||||
# documentation and/or other materials provided with the distribution.
|
||||
#
|
||||
# * Neither the name of Intel Corporation nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
||||
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
||||
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
|
||||
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
|
||||
# OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
# // Author: Filippov Ilia
|
||||
|
||||
import common
|
||||
import sys
|
||||
import os
|
||||
import string
|
||||
print_debug = common.print_debug
|
||||
error = common.error
|
||||
take_lines = common.take_lines
|
||||
|
||||
exists = [False, False, False, False, False, False, False, False]
|
||||
names = ["m4", "bison", "flex", "sde", "ispc", "clang", "gcc", "icc"]
|
||||
|
||||
PATH_dir = string.split(os.getenv("PATH"), os.pathsep)
|
||||
for counter in PATH_dir:
|
||||
for i in range(0,8):
|
||||
if os.path.exists(counter + os.sep + names[i]):
|
||||
exists[i] = True
|
||||
|
||||
print_debug("=== in PATH: ===\n", False, "")
|
||||
print_debug("Tools:\n", False, "")
|
||||
for i in range(0,3):
|
||||
if exists[i]:
|
||||
print_debug(take_lines(names[i] + " --version", "first"), False, "")
|
||||
else:
|
||||
error("you don't have " + names[i], 0)
|
||||
if exists[0] and exists[1] and exists[2]:
|
||||
if common.check_tools(2):
|
||||
print_debug("Tools' versions are ok\n", False, "")
|
||||
print_debug("\nSDE:\n", False, "")
|
||||
if exists[3]:
|
||||
print_debug(take_lines(names[3] + " --version", "first"), False, "")
|
||||
else:
|
||||
error("you don't have " + names[3], 2)
|
||||
print_debug("\nISPC:\n", False, "")
|
||||
if exists[4]:
|
||||
print_debug(take_lines(names[4] + " --version", "first"), False, "")
|
||||
else:
|
||||
error("you don't have " + names[4], 2)
|
||||
print_debug("\nC/C++ compilers:\n", False, "")
|
||||
for i in range(5,8):
|
||||
if exists[i]:
|
||||
print_debug(take_lines(names[i] + " --version", "first"), False, "")
|
||||
else:
|
||||
error("you don't have " + names[i], 2)
|
||||
|
||||
print_debug("\n=== in ISPC specific environment variables: ===\n", False, "")
|
||||
if os.environ.get("LLVM_HOME") == None:
|
||||
error("you have no LLVM_HOME", 2)
|
||||
else:
|
||||
print_debug("Your LLVM_HOME:" + os.environ.get("LLVM_HOME") + "\n", False, "")
|
||||
if os.environ.get("ISPC_HOME") == None:
|
||||
error("you have no ISPC_HOME", 2)
|
||||
else:
|
||||
print_debug("Your ISPC_HOME:" + os.environ.get("ISPC_HOME") + "\n", False, "")
|
||||
if os.path.exists(os.environ.get("ISPC_HOME") + os.sep + "ispc"):
|
||||
print_debug("You have ISPC in your ISPC_HOME: " +
|
||||
take_lines(os.environ.get("ISPC_HOME") + os.sep + "ispc" + " --version", "first"), False, "")
|
||||
else:
|
||||
error("you don't have ISPC in your ISPC_HOME", 2)
|
||||
if os.environ.get("SDE_HOME") == None:
|
||||
error("You have no SDE_HOME", 2)
|
||||
else:
|
||||
print_debug("Your SDE_HOME:" + os.environ.get("SDE_HOME") + "\n", False, "")
|
||||
if os.path.exists(os.environ.get("SDE_HOME") + os.sep + "sde"):
|
||||
print_debug("You have sde in your SDE_HOME: " +
|
||||
take_lines(os.environ.get("SDE_HOME") + os.sep + "sde" + " --version", "first"), False, "")
|
||||
else:
|
||||
error("you don't have any SDE in your ISPC_HOME", 2)
|
||||
170
check_isa.cpp
Normal file
170
check_isa.cpp
Normal file
@@ -0,0 +1,170 @@
|
||||
/*
|
||||
Copyright (c) 2013-2015, Intel Corporation
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
|
||||
* Neither the name of Intel Corporation nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
||||
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
||||
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
|
||||
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
|
||||
OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// //
|
||||
// This file is a standalone program, which detects the best supported ISA. //
|
||||
// //
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
#if defined(_WIN32) || defined(_WIN64)
|
||||
#define ISPC_IS_WINDOWS
|
||||
#include <intrin.h>
|
||||
#endif
|
||||
|
||||
#if !defined (__arm__)
|
||||
#if !defined(ISPC_IS_WINDOWS)
|
||||
static void __cpuid(int info[4], int infoType) {
|
||||
__asm__ __volatile__ ("cpuid"
|
||||
: "=a" (info[0]), "=b" (info[1]), "=c" (info[2]), "=d" (info[3])
|
||||
: "0" (infoType));
|
||||
}
|
||||
|
||||
/* Save %ebx in case it's the PIC register */
|
||||
static void __cpuidex(int info[4], int level, int count) {
|
||||
__asm__ __volatile__ ("xchg{l}\t{%%}ebx, %1\n\t"
|
||||
"cpuid\n\t"
|
||||
"xchg{l}\t{%%}ebx, %1\n\t"
|
||||
: "=a" (info[0]), "=r" (info[1]), "=c" (info[2]), "=d" (info[3])
|
||||
: "0" (level), "2" (count));
|
||||
}
|
||||
#endif // !ISPC_IS_WINDOWS
|
||||
|
||||
static bool __os_has_avx_support() {
|
||||
#if defined(ISPC_IS_WINDOWS)
|
||||
// Check if the OS will save the YMM registers
|
||||
unsigned long long xcrFeatureMask = _xgetbv(_XCR_XFEATURE_ENABLED_MASK);
|
||||
return (xcrFeatureMask & 6) == 6;
|
||||
#else // !defined(ISPC_IS_WINDOWS)
|
||||
// Check xgetbv; this uses a .byte sequence instead of the instruction
|
||||
// directly because older assemblers do not include support for xgetbv and
|
||||
// there is no easy way to conditionally compile based on the assembler used.
|
||||
int rEAX, rEDX;
|
||||
__asm__ __volatile__ (".byte 0x0f, 0x01, 0xd0" : "=a" (rEAX), "=d" (rEDX) : "c" (0));
|
||||
return (rEAX & 6) == 6;
|
||||
#endif // !defined(ISPC_IS_WINDOWS)
|
||||
}
|
||||
|
||||
static bool __os_has_avx512_support() {
|
||||
#if defined(ISPC_IS_WINDOWS)
|
||||
// Check if the OS saves the XMM, YMM and ZMM registers, i.e. it supports AVX2 and AVX512.
|
||||
// See section 2.1 of software.intel.com/sites/default/files/managed/0d/53/319433-022.pdf
|
||||
unsigned long long xcrFeatureMask = _xgetbv(_XCR_XFEATURE_ENABLED_MASK);
|
||||
return (xcrFeatureMask & 0xE6) == 0xE6;
|
||||
#else // !defined(ISPC_IS_WINDOWS)
|
||||
// Check xgetbv; this uses a .byte sequence instead of the instruction
|
||||
// directly because older assemblers do not include support for xgetbv and
|
||||
// there is no easy way to conditionally compile based on the assembler used.
|
||||
int rEAX, rEDX;
|
||||
__asm__ __volatile__ (".byte 0x0f, 0x01, 0xd0" : "=a" (rEAX), "=d" (rEDX) : "c" (0));
|
||||
return (rEAX & 0xE6) == 0xE6;
|
||||
#endif // !defined(ISPC_IS_WINDOWS)
|
||||
}
|
||||
#endif // !__arm__
|
||||
|
||||
|
||||
static const char *
|
||||
lGetSystemISA() {
|
||||
#ifdef __arm__
|
||||
return "ARM NEON";
|
||||
#else
|
||||
int info[4];
|
||||
__cpuid(info, 1);
|
||||
|
||||
int info2[4];
|
||||
// Call cpuid with eax=7, ecx=0
|
||||
__cpuidex(info2, 7, 0);
|
||||
|
||||
if ((info[2] & (1 << 27)) != 0 && // OSXSAVE
|
||||
(info2[1] & (1 << 5)) != 0 && // AVX2
|
||||
(info2[1] & (1 << 16)) != 0 && // AVX512 F
|
||||
__os_has_avx512_support()) {
|
||||
// We need to verify that AVX2 is also available,
|
||||
// as well as AVX512, because our targets are supposed
|
||||
// to use both.
|
||||
|
||||
if ((info2[1] & (1 << 17)) != 0 && // AVX512 DQ
|
||||
(info2[1] & (1 << 28)) != 0 && // AVX512 CDI
|
||||
(info2[1] & (1 << 30)) != 0 && // AVX512 BW
|
||||
(info2[1] & (1 << 31)) != 0) { // AVX512 VL
|
||||
return "SKX";
|
||||
}
|
||||
else if ((info2[1] & (1 << 26)) != 0 && // AVX512 PF
|
||||
(info2[1] & (1 << 27)) != 0 && // AVX512 ER
|
||||
(info2[1] & (1 << 28)) != 0) { // AVX512 CDI
|
||||
return "KNL";
|
||||
}
|
||||
// If it's unknown AVX512 target, fall through and use AVX2
|
||||
// or whatever is available in the machine.
|
||||
}
|
||||
|
||||
if ((info[2] & (1 << 27)) != 0 && // OSXSAVE
|
||||
(info[2] & (1 << 28)) != 0 &&
|
||||
__os_has_avx_support()) { // AVX
|
||||
// AVX1 for sure....
|
||||
// Ivy Bridge?
|
||||
if ((info[2] & (1 << 29)) != 0 && // F16C
|
||||
(info[2] & (1 << 30)) != 0) { // RDRAND
|
||||
// So far, so good. AVX2?
|
||||
if ((info2[1] & (1 << 5)) != 0) {
|
||||
return "AVX2 (codename Haswell)";
|
||||
}
|
||||
else {
|
||||
return "AVX1.1 (codename Ivy Bridge)";
|
||||
}
|
||||
}
|
||||
// Regular AVX
|
||||
return "AVX (codename Sandy Bridge)";
|
||||
}
|
||||
else if ((info[2] & (1 << 19)) != 0) {
|
||||
return "SSE4";
|
||||
}
|
||||
else if ((info[3] & (1 << 26)) != 0) {
|
||||
return "SSE2";
|
||||
}
|
||||
else {
|
||||
return "Error";
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
int main () {
|
||||
const char* isa = lGetSystemISA();
|
||||
printf("ISA: %s\n", isa);
|
||||
|
||||
return 0;
|
||||
}
|
||||
502
common.py
Executable file
502
common.py
Executable file
@@ -0,0 +1,502 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2013, Intel Corporation
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
#
|
||||
# * Redistributions in binary form must reproduce the above copyright
|
||||
# notice, this list of conditions and the following disclaimer in the
|
||||
# documentation and/or other materials provided with the distribution.
|
||||
#
|
||||
# * Neither the name of Intel Corporation nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
||||
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
||||
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
|
||||
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
|
||||
# OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
# // Author: Filippov Ilia, Anton Mitrokhin, Vsevolod Livinskiy
|
||||
import sys
|
||||
import os
|
||||
import errno
|
||||
import shutil
|
||||
|
||||
# generic empty class
|
||||
class EmptyClass(object): pass
|
||||
|
||||
# load/save almost every object to a file (good for bug reproducing)
|
||||
def dump(fname, obj):
|
||||
import pickle
|
||||
with open(fname, 'w') as fp:
|
||||
pickle.dump(obj, fp)
|
||||
|
||||
def undump(fname):
|
||||
import pickle
|
||||
with open(fname, 'r') as fp:
|
||||
obj = pickle.load(fp)
|
||||
return obj
|
||||
|
||||
# retrieve the host name
|
||||
def get_host_name():
|
||||
import socket
|
||||
return socket.gethostname()
|
||||
|
||||
def write_to_file(filename, line):
|
||||
f = open(filename, 'a')
|
||||
f.writelines(line)
|
||||
f.close()
|
||||
|
||||
# remove file if it exists
|
||||
def remove_if_exists(filename):
|
||||
if os.path.exists(filename):
|
||||
if os.path.isdir(filename):
|
||||
shutil.rmtree(filename)
|
||||
else:
|
||||
os.remove(filename)
|
||||
|
||||
def make_sure_dir_exists(path):
|
||||
try:
|
||||
os.makedirs(path)
|
||||
except OSError as exception:
|
||||
if exception.errno != errno.EEXIST:
|
||||
raise
|
||||
|
||||
|
||||
# detect version which is printed after command
|
||||
def take_lines(command, which):
|
||||
os.system(command + " > " + "temp_detect_version")
|
||||
version = open("temp_detect_version")
|
||||
if which == "first":
|
||||
answer = version.readline()
|
||||
if which == "all":
|
||||
answer = version.readlines()
|
||||
version.close()
|
||||
remove_if_exists("temp_detect_version")
|
||||
return answer
|
||||
|
||||
# print versions of compilers
|
||||
def print_version(ispc_test, ispc_ref, ref_compiler, s, perf_log, is_windows):
|
||||
print_debug("\nUsing test compiler: " + take_lines(ispc_test + " --version", "first"), s, perf_log)
|
||||
if ispc_ref != "":
|
||||
print_debug("Using ref compiler: " + take_lines(ispc_ref + " --version", "first"), s, perf_log)
|
||||
if is_windows == False:
|
||||
temp1 = take_lines(ref_compiler + " --version", "first")
|
||||
else:
|
||||
os.system(ref_compiler + " 2>&1" + " 2> temp_detect_version > temp_detect_version1" )
|
||||
version = open("temp_detect_version")
|
||||
temp1 = version.readline()
|
||||
version.close()
|
||||
remove_if_exists("temp_detect_version")
|
||||
remove_if_exists("temp_detect_version1")
|
||||
print_debug("Using C/C++ compiler: " + temp1 + "\n", s, perf_log)
|
||||
|
||||
# print everything from scripts instead errors
|
||||
def print_debug(line, silent, filename):
|
||||
if silent == False:
|
||||
sys.stdout.write(line)
|
||||
sys.stdout.flush()
|
||||
if os.environ.get("ISPC_HOME") != None:
|
||||
if os.path.exists(os.environ.get("ISPC_HOME")):
|
||||
write_to_file(os.environ["ISPC_HOME"] + os.sep + "notify_log.log", line)
|
||||
if filename != "":
|
||||
write_to_file(filename, line)
|
||||
|
||||
# print errors from scripts
|
||||
# type 1 for error in environment
|
||||
# type 2 for warning
|
||||
# type 3 for error of compiler or test which isn't the goal of script
|
||||
def error(line, error_type):
|
||||
line = line + "\n"
|
||||
if error_type == 1:
|
||||
sys.stderr.write("Fatal error: " + line)
|
||||
sys.exit(1)
|
||||
if error_type == 2:
|
||||
sys.stderr.write("Warning: " + line)
|
||||
if error_type == 0:
|
||||
print_debug("FIND ERROR: " + line, False, "")
|
||||
|
||||
def check_tools(m):
|
||||
input_tools=[[[1,4],"m4 --version", "bad m4 version"],
|
||||
[[2,4],"bison --version", "bad bison version"],
|
||||
[[2,5], "flex --version", "bad flex version"]]
|
||||
ret = 1
|
||||
for t in range(0,len(input_tools)):
|
||||
t1 = ((take_lines(input_tools[t][1], "first"))[:-1].split(" "))
|
||||
for i in range(0,len(t1)):
|
||||
t11 = t1[i].split(".")
|
||||
f = True
|
||||
for j in range(0,len(t11)):
|
||||
if not t11[j].isdigit():
|
||||
f = False
|
||||
if f == True:
|
||||
for j in range(0,len(t11)):
|
||||
if j < len(input_tools[t][0]):
|
||||
if int(t11[j])<input_tools[t][0][j]:
|
||||
error(input_tools[t][2], m)
|
||||
ret = 0
|
||||
break
|
||||
if int(t11[j])>input_tools[t][0][j]:
|
||||
break
|
||||
return ret
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
# regression testing functionality
|
||||
class TestResult(object):
|
||||
"""
|
||||
this class stores basicly two integers which stand for the result
|
||||
of the test: (runfail{0/1}, compfail{0/1}). other values are
|
||||
deemed invalid. the __cmp__ function of this class is used to
|
||||
define what test regression actually is.
|
||||
"""
|
||||
def __init__(self, runfailed, compfailed):
|
||||
self.runfailed, self.compfailed = (runfailed, compfailed)
|
||||
|
||||
def __cmp__(self, other):
|
||||
if isinstance(other, TestResult):
|
||||
if self.runfailed == other.runfailed and \
|
||||
self.compfailed == other.compfailed:
|
||||
return 0
|
||||
elif self.compfailed > other.compfailed:
|
||||
return 1
|
||||
elif self.runfailed > other.runfailed and \
|
||||
self.compfailed == other.compfailed:
|
||||
return 1
|
||||
else:
|
||||
return -1
|
||||
|
||||
raise RuntimeError("Wrong type for comparioson")
|
||||
return NotImplemented
|
||||
|
||||
def __repr__(self):
|
||||
if (self.runfailed < 0 or self.compfailed < 0):
|
||||
return "(Undefined)"
|
||||
return "(r%d c%d)" % (self.runfailed, self.compfailed)
|
||||
|
||||
|
||||
class TestCase(object):
|
||||
"""
|
||||
the TestCase() is a combination of parameters the tast was run with:
|
||||
the architecture (x86, x86-64 ...), compiler optimization (-O0, -O2 ...)
|
||||
and target (sse, avx ...). we also store the result of the test here.
|
||||
"""
|
||||
def __init__(self, arch, opt, target):
|
||||
self.arch, self.opt, self.target = (arch, opt, target)
|
||||
self.result = TestResult(-1, -1)
|
||||
|
||||
def __repr__(self):
|
||||
string = "%s %s %s: " % (self.arch, self.opt, self.target)
|
||||
string = string + repr(self.result)
|
||||
return string
|
||||
|
||||
def __hash__(self):
|
||||
return hash(self.arch + self.opt + self.target)
|
||||
|
||||
def __ne__(self, other):
|
||||
if isinstance(other, TestCase):
|
||||
if hash(self.arch + self.opt + self.target) != hash(other):
|
||||
return True
|
||||
return False
|
||||
raise RuntimeError("Wrong type for comparioson")
|
||||
return NotImplemented
|
||||
|
||||
def __eq__(self, other):
|
||||
if isinstance(other, TestCase):
|
||||
return not self.__ne__(other)
|
||||
raise RuntimeError("Wrong type for comparioson")
|
||||
return NotImplemented
|
||||
|
||||
|
||||
class Test(object):
|
||||
"""
|
||||
Test() stores all TestCase() objects for a given test file name
|
||||
i.e. all archs/opts/targets/ and corresponding testing results.
|
||||
"""
|
||||
def __init__(self, name):
|
||||
self.name = name
|
||||
self.test_cases = []
|
||||
|
||||
def add_result(self, test_case):
|
||||
if test_case in self.test_cases:
|
||||
raise RuntimeError("This test case is already in the list: " + repr(test_case))
|
||||
return
|
||||
self.test_cases.append(test_case)
|
||||
|
||||
def __repr__(self):
|
||||
string = self.name + '\n'
|
||||
string = string.rjust(20)
|
||||
for test_case in self.test_cases:
|
||||
string += repr(test_case).rjust(60) + '\n'
|
||||
return string
|
||||
|
||||
def __hash__(self):
|
||||
return hash(self.name)
|
||||
|
||||
def __ne__(self, other):
|
||||
if isinstance(other, Test):
|
||||
if hash(self) != hash(other):
|
||||
return True
|
||||
return False
|
||||
return NotImplemented
|
||||
|
||||
def __eq__(self, other):
|
||||
if isinstance(other, Test):
|
||||
return not self.__ne__(other)
|
||||
return NotImplemented
|
||||
|
||||
|
||||
class RegressionInfo(object):
|
||||
"""
|
||||
service class which provides some statistics on a given regression.
|
||||
the regression test names and cases are given in a form of Test() objects
|
||||
with empty (-1, -1) results
|
||||
"""
|
||||
def __init__(self, revision_old, revision_new, tests):
|
||||
self.revision_old, self.revision_new = (revision_old, revision_new)
|
||||
self.tests = tests
|
||||
self.archfailes = {}
|
||||
self.optfails = {}
|
||||
self.targetfails = {}
|
||||
self.testfails = {}
|
||||
self.archs = []
|
||||
self.opts = []
|
||||
self.targets = []
|
||||
|
||||
for test in tests:
|
||||
for test_case in test.test_cases:
|
||||
self.inc_dictionary(self.testfails, test.name)
|
||||
self.inc_dictionary(self.archfailes, test_case.arch)
|
||||
self.inc_dictionary(self.optfails, test_case.opt)
|
||||
self.inc_dictionary(self.targetfails, test_case.target)
|
||||
|
||||
self.archs = self.archfailes.keys()
|
||||
self.opts = self.optfails.keys()
|
||||
self.targets = self.targetfails.keys()
|
||||
|
||||
def inc_dictionary(self, dictionary, key):
|
||||
if key not in dictionary:
|
||||
dictionary[key] = 0
|
||||
dictionary[key] += 1
|
||||
|
||||
def __repr__(self):
|
||||
string = "Regression of LLVM revision %s in comparison to %s\n" % (self.revision_new, self.revision_old)
|
||||
string += repr(self.tests) + '\n'
|
||||
string += str(self.testfails) + '\n'
|
||||
string += str(self.archfailes) + '\n'
|
||||
string += str(self.optfails) + '\n'
|
||||
string += str(self.targetfails) + '\n'
|
||||
|
||||
return string
|
||||
|
||||
|
||||
class TestTable(object):
|
||||
"""
|
||||
the table which stores a tuple of Test() objects (one per revision) and has some
|
||||
convenience methods for dealing with them
|
||||
"""
|
||||
def __init__(self):
|
||||
""" This dictionary contains {rev: [test1, test2, ...], ...}, where 'rev' is a string (revision name) and 'test#'
|
||||
is a Test() object instance """
|
||||
self.table = {}
|
||||
|
||||
def add_result(self, revision_name, test_name, arch, opt, target, runfailed, compfailed):
|
||||
revision_name = str(revision_name)
|
||||
if revision_name not in self.table:
|
||||
self.table[revision_name] = []
|
||||
|
||||
test_case = TestCase(arch, opt, target)
|
||||
test_case.result = TestResult(runfailed, compfailed)
|
||||
|
||||
for test in self.table[revision_name]:
|
||||
if test.name == test_name:
|
||||
test.add_result(test_case)
|
||||
return
|
||||
|
||||
test = Test(test_name)
|
||||
test.add_result(test_case)
|
||||
self.table[revision_name].append(test)
|
||||
|
||||
def test_intersection(self, test1, test2):
|
||||
""" Return test cases common for test1 and test2. If test names are different than there is nothing in common """
|
||||
if test1.name != test2.name:
|
||||
return []
|
||||
return list(set(test1.test_cases) & set(test2.test_cases))
|
||||
|
||||
def test_regression(self, test1, test2):
|
||||
""" Return the tuple of empty (i.e. with undefined results) TestCase() objects
|
||||
corresponding to regression in test2 comparing to test1 """
|
||||
if test1.name != test2.name:
|
||||
return []
|
||||
|
||||
regressed = []
|
||||
for tc1 in test1.test_cases:
|
||||
for tc2 in test2.test_cases:
|
||||
""" If test cases are equal (same arch, opt and target) but tc2 has more runfails or compfails """
|
||||
if tc1 == tc2 and tc1.result < tc2.result:
|
||||
regressed.append(TestCase(tc1.arch, tc1.opt, tc1.target))
|
||||
return regressed
|
||||
|
||||
def regression(self, revision_old, revision_new):
|
||||
""" Return a tuple of Test() objects containing TestCase() object which show regression along given revisions """
|
||||
revision_old, revision_new = (str(revision_old), str(revision_new))
|
||||
if revision_new not in self.table:
|
||||
raise RuntimeError("This revision in not in the database: " + str(revision_new) + " (" + str(self.table.keys()) + ")")
|
||||
return
|
||||
|
||||
if revision_old not in self.table:
|
||||
raise RuntimeError("This revision in not in the database: " + str(revision_old) + " (" + str(self.table.keys()) + ")")
|
||||
return
|
||||
|
||||
regressed = []
|
||||
for test_old in self.table[revision_old]:
|
||||
for test_new in self.table[revision_new]:
|
||||
tr = self.test_regression(test_old, test_new)
|
||||
if len(tr) == 0:
|
||||
continue
|
||||
test = Test(test_new.name)
|
||||
for test_case in tr:
|
||||
test.add_result(test_case)
|
||||
regressed.append(test)
|
||||
return RegressionInfo(revision_old, revision_new, regressed)
|
||||
|
||||
def __repr__(self):
|
||||
string = ""
|
||||
for rev in self.table.keys():
|
||||
string += "[" + rev + "]:\n"
|
||||
for test in self.table[rev]:
|
||||
string += repr(test) + '\n'
|
||||
return string
|
||||
|
||||
|
||||
class RevisionInfo(object):
|
||||
"""
|
||||
this class is intended to store some relevant information about curent LLVM revision
|
||||
"""
|
||||
def __init__(self, hostname, revision):
|
||||
self.hostname, self.revision = hostname, revision
|
||||
self.archs = []
|
||||
self.opts = []
|
||||
self.targets = []
|
||||
self.succeed = 0
|
||||
self.runfailed = 0
|
||||
self.compfailed = 0
|
||||
self.skipped = 0
|
||||
self.testall = 0
|
||||
self.regressions = {}
|
||||
|
||||
def register_test(self, arch, opt, target, succeed, runfailed, compfailed, skipped):
|
||||
if arch not in self.archs:
|
||||
self.archs.append(arch)
|
||||
if opt not in self.opts:
|
||||
self.opts.append(opt)
|
||||
if target not in self.targets:
|
||||
self.targets.append(target)
|
||||
self.runfailed += runfailed
|
||||
self.compfailed += compfailed
|
||||
self.skipped += skipped
|
||||
self.succeed += succeed
|
||||
|
||||
def add_regression(self, revision, regression_info):
|
||||
""" input is intended to be from 'TestTable.regression(..)', 'regression_info' is a tuple of RegressionInfo() object
|
||||
(regression.py) and 'revision' is tested (not current) LLVM revision name """
|
||||
if revision == self.revision:
|
||||
raise RuntimeError("No regression can be found along the same LLVM revision!")
|
||||
|
||||
if revision in self.regressions:
|
||||
raise RuntimeError("This revision regression info is already in self.regressions!")
|
||||
|
||||
self.regressions[revision] = regression_info
|
||||
|
||||
def __repr__(self):
|
||||
string = "%s: LLVM(%s)\n" % (self.hostname, self.revision)
|
||||
string += "archs : %s\n" % (str(self.archs))
|
||||
string += "opts : %s\n" % (str(self.opts))
|
||||
string += "targets: %s\n" % (str(self.targets))
|
||||
string += "runfails: %d/%d\n" % (self.runfailed, self.testall)
|
||||
string += "compfails: %d/%d\n" % (self.compfailed, self.testall)
|
||||
string += "skipped: %d/%d\n" % (self.skipped, self.testall)
|
||||
string += "succeed: %d/%d\n" % (self.succeed, self.testall)
|
||||
return string
|
||||
|
||||
|
||||
class ExecutionStateGatherer(object):
|
||||
def __init__(self):
|
||||
self.hostname = self.get_host_name()
|
||||
self.revision = ""
|
||||
self.rinf = []
|
||||
self.tt = TestTable()
|
||||
self.switch_revision("undefined")
|
||||
|
||||
def switch_revision(self, revision):
|
||||
self.revision = revision
|
||||
self.rinf.append(RevisionInfo(self.hostname, self.revision))
|
||||
|
||||
def current_rinf(self):
|
||||
if len(self.rinf) == 0:
|
||||
raise RuntimeError("self.rinf is empty. Apparently you've never invoked switch_revision")
|
||||
return self.rinf[len(self.rinf) - 1]
|
||||
|
||||
def add_to_tt(self, test_name, arch, opt, target, runfailed, compfailed):
|
||||
if len(self.rinf) == 0:
|
||||
raise RuntimeError("self.rinf is empty. Apparently you've never invoked switch_revision")
|
||||
self.tt.add_result(self.revision, test_name, arch, opt, target, runfailed, compfailed)
|
||||
|
||||
def add_to_rinf(self, arch, opt, target, succeed, runfailed, compfailed, skipped):
|
||||
self.current_rinf().register_test(arch, opt, target, succeed, runfailed, compfailed, skipped)
|
||||
|
||||
def add_to_rinf_testall(self, tried_to_test):
|
||||
self.current_rinf().testall += tried_to_test
|
||||
|
||||
def load_from_tt(self, tt):
|
||||
# TODO: fill in self.rinf field!
|
||||
self.tt = tt
|
||||
REVISIONS = tt.table.keys()
|
||||
self.revision = ""
|
||||
if len(REVISIONS) != 0:
|
||||
self.revision = REVISIONS[0]
|
||||
print "ESG: loaded from 'TestTable()' with revisions", REVISIONS
|
||||
|
||||
def dump(self, fname, obj):
|
||||
import pickle
|
||||
with open(fname, 'w') as fp:
|
||||
pickle.dump(obj, fp)
|
||||
|
||||
def undump(self, fname):
|
||||
import pickle
|
||||
with open(fname, 'r') as fp:
|
||||
obj = pickle.load(fp)
|
||||
return obj
|
||||
|
||||
def get_host_name(self):
|
||||
import socket
|
||||
return socket.gethostname()
|
||||
|
||||
def __repr__(self):
|
||||
string = "Hostname: %s\n" % (self.hostname)
|
||||
string += "Current LLVM Revision = %s\n\n" % (self.revision)
|
||||
for rev_info in self.rinf:
|
||||
string += repr(rev_info) + '\n'
|
||||
return string
|
||||
|
||||
|
||||
# this class instance is intended to gather and store all information
|
||||
# regarding the testing process.
|
||||
ex_state = ExecutionStateGatherer()
|
||||
@@ -1,7 +1,7 @@
|
||||
" Vim syntax file
|
||||
" Language: ISPC
|
||||
" Maintainer: Andreas Wendleder <andreas.wendleder@gmail.com>
|
||||
" Last Change: 2011 Aug 3
|
||||
" Last Change: 2016 May 04
|
||||
|
||||
" Quit when a syntax file was already loaded
|
||||
if exists("b:current_syntax")
|
||||
@@ -13,11 +13,19 @@ runtime! syntax/c.vim
|
||||
unlet b:current_syntax
|
||||
|
||||
" New keywords
|
||||
syn keyword ispcStatement cbreak ccontinue creturn launch print reference soa sync task
|
||||
syn keyword ispcStatement cbreak ccontinue creturn launch print reference soa sync
|
||||
syn keyword ispcConditional cif
|
||||
syn keyword ispcRepeat cdo cfor cwhile
|
||||
syn keyword ispcBuiltin programCount programIndex
|
||||
syn keyword ispcType export int8 int16 int32 int64
|
||||
syn keyword ispcRepeat cdo cfor cwhile foreach foreach_tiled foreach_unique foreach_active
|
||||
syn keyword ispcBuiltin programCount programIndex taskCount taskCount0 taskCount1 taskCount3 taskIndex taskIndex0 taskIndex1 taskIndex2
|
||||
syn keyword ispcType export uniform varying int8 int16 int32 int64 task new delete
|
||||
syn keyword ispcOperator operator
|
||||
|
||||
"double precision floating point number, with dot, optional exponent
|
||||
syn match cFloat display contained "\d\+\.\d*d[-+]\=\d*\>"
|
||||
"double precision floating point number, starting with dot, optional exponent
|
||||
syn match cFloat display contained ".\d*d[-+]\=\d*\>"
|
||||
"double precision floating point number, without dot, with exponent
|
||||
syn match cFloat display contained "\d\+d[-+]\=\d\+\>"
|
||||
|
||||
" Default highlighting
|
||||
command -nargs=+ HiLink hi def link <args>
|
||||
@@ -26,6 +34,7 @@ HiLink ispcConditional Conditional
|
||||
HiLink ispcRepeat Repeat
|
||||
HiLink ispcBuiltin Statement
|
||||
HiLink ispcType Type
|
||||
HiLink ispcOperator Operator
|
||||
delcommand HiLink
|
||||
|
||||
let b:current_syntax = "ispc"
|
||||
|
||||
8
contrib/ispc.vim.README
Normal file
8
contrib/ispc.vim.README
Normal file
@@ -0,0 +1,8 @@
|
||||
To install vim syntax highlighting for ispc files:
|
||||
|
||||
1) Copy ispc.vim into ~/.vim/syntax/ispc.vim (create if necessary)
|
||||
2) Create a filetype for ispc files to correspond to that syntax file
|
||||
To do this, create and append the following line to ~/.vim/ftdetect/ispc.vim
|
||||
|
||||
au BufRead,BufNewFile *.ispc set filetype=ispc
|
||||
|
||||
313
ctx.h
313
ctx.h
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright (c) 2010-2011, Intel Corporation
|
||||
Copyright (c) 2010-2015, Intel Corporation
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
@@ -28,11 +28,11 @@
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
/** @file ctx.h
|
||||
@brief Declaration of the FunctionEmitContext class
|
||||
@brief %Declaration of the FunctionEmitContext class
|
||||
*/
|
||||
|
||||
#ifndef ISPC_CTX_H
|
||||
@@ -40,10 +40,20 @@
|
||||
|
||||
#include "ispc.h"
|
||||
#include <map>
|
||||
#include <llvm/InstrTypes.h>
|
||||
#include <llvm/Instructions.h>
|
||||
#include <llvm/Analysis/DIBuilder.h>
|
||||
#include <llvm/Analysis/DebugInfo.h>
|
||||
#if ISPC_LLVM_VERSION == ISPC_LLVM_3_2
|
||||
#include <llvm/InstrTypes.h>
|
||||
#include <llvm/Instructions.h>
|
||||
#else // 3.3+
|
||||
#include <llvm/IR/InstrTypes.h>
|
||||
#include <llvm/IR/Instructions.h>
|
||||
#endif
|
||||
#if ISPC_LLVM_VERSION <= ISPC_LLVM_3_4
|
||||
#include <llvm/DebugInfo.h>
|
||||
#include <llvm/DIBuilder.h>
|
||||
#else // 3.5+
|
||||
#include <llvm/IR/DebugInfo.h>
|
||||
#include <llvm/IR/DIBuilder.h>
|
||||
#endif
|
||||
|
||||
struct CFInfo;
|
||||
|
||||
@@ -65,7 +75,7 @@ public:
|
||||
@param firstStmtPos Source file position of the first statement in the
|
||||
function
|
||||
*/
|
||||
FunctionEmitContext(Function *function, Symbol *funSym,
|
||||
FunctionEmitContext(Function *function, Symbol *funSym,
|
||||
llvm::Function *llvmFunction,
|
||||
SourcePos firstStmtPos);
|
||||
~FunctionEmitContext();
|
||||
@@ -77,9 +87,9 @@ public:
|
||||
/** @name Current basic block management
|
||||
@{
|
||||
*/
|
||||
/** Returns the current basic block pointer */
|
||||
/** Returns the current basic block pointer */
|
||||
llvm::BasicBlock *GetCurrentBasicBlock();
|
||||
|
||||
|
||||
/** Set the given llvm::BasicBlock to be the basic block to emit
|
||||
forthcoming instructions into. */
|
||||
void SetCurrentBasicBlock(llvm::BasicBlock *bblock);
|
||||
@@ -87,7 +97,7 @@ public:
|
||||
/** @name Mask management
|
||||
@{
|
||||
*/
|
||||
/** Returns the mask value at entry to the current function. */
|
||||
/** Returns the mask value at entry to the current function. */
|
||||
llvm::Value *GetFunctionMask();
|
||||
|
||||
/** Returns the mask value corresponding to "varying" control flow
|
||||
@@ -96,7 +106,7 @@ public:
|
||||
llvm::Value *GetInternalMask();
|
||||
|
||||
/** Returns the complete current mask value--i.e. the logical AND of
|
||||
the function entry mask and the internal mask. */
|
||||
the function entry mask and the internal mask. */
|
||||
llvm::Value *GetFullMask();
|
||||
|
||||
/** Returns a pointer to storage in memory that stores the current full
|
||||
@@ -149,22 +159,21 @@ public:
|
||||
'continue' statements should jump to (if all running lanes want to
|
||||
break or continue), uniformControlFlow indicates whether the loop
|
||||
condition is 'uniform'. */
|
||||
void StartLoop(llvm::BasicBlock *breakTarget, llvm::BasicBlock *continueTarget,
|
||||
void StartLoop(llvm::BasicBlock *breakTarget, llvm::BasicBlock *continueTarget,
|
||||
bool uniformControlFlow);
|
||||
|
||||
/** Informs FunctionEmitContext of the value of the mask at the start
|
||||
of a loop body. */
|
||||
void SetLoopMask(llvm::Value *mask);
|
||||
of a loop body or switch statement. */
|
||||
void SetBlockEntryMask(llvm::Value *mask);
|
||||
|
||||
/** Informs FunctionEmitContext that code generation for a loop is
|
||||
finished. */
|
||||
void EndLoop();
|
||||
|
||||
/** Indicates that code generation for a 'foreach' or 'foreach_tiled'
|
||||
loop is about to start. The provided basic block pointer indicates
|
||||
where control flow should go if a 'continue' statement is executed
|
||||
in the loop. */
|
||||
void StartForeach(llvm::BasicBlock *continueTarget);
|
||||
/** Indicates that code generation for a 'foreach', 'foreach_tiled',
|
||||
'foreach_active', or 'foreach_unique' loop is about to start. */
|
||||
enum ForeachType { FOREACH_REGULAR, FOREACH_ACTIVE, FOREACH_UNIQUE };
|
||||
void StartForeach(ForeachType ft);
|
||||
void EndForeach();
|
||||
|
||||
/** Emit code for a 'break' statement in a loop. If doCoherenceCheck
|
||||
@@ -186,6 +195,52 @@ public:
|
||||
'continue' statement when going through the loop body in the
|
||||
previous iteration. */
|
||||
void RestoreContinuedLanes();
|
||||
|
||||
/** This method is called by code emitting IR for a loop. It clears
|
||||
any lanes that contained a break since the mask has been updated to take
|
||||
them into account. This is necessary as all the bail out checks for
|
||||
breaks are meant to only deal with lanes breaking on the current iteration.
|
||||
*/
|
||||
void ClearBreakLanes();
|
||||
|
||||
/** Indicates that code generation for a "switch" statement is about to
|
||||
start. isUniform indicates whether the "switch" value is uniform,
|
||||
and bbAfterSwitch gives the basic block immediately following the
|
||||
"switch" statement. (For example, if the switch condition is
|
||||
uniform, we jump here upon executing a "break" statement.) */
|
||||
void StartSwitch(bool isUniform, llvm::BasicBlock *bbAfterSwitch);
|
||||
/** Indicates the end of code generation for a "switch" statement. */
|
||||
void EndSwitch();
|
||||
|
||||
/** Emits code for a "switch" statement in the program.
|
||||
@param expr Gives the value of the expression after the "switch"
|
||||
@param defaultBlock Basic block to execute for the "default" case. This
|
||||
should be NULL if there is no "default" label inside
|
||||
the switch.
|
||||
@param caseBlocks vector that stores the mapping from label values
|
||||
after "case" statements to basic blocks corresponding
|
||||
to the "case" labels.
|
||||
@param nextBlocks For each basic block for a "case" or "default"
|
||||
label, this gives the basic block for the
|
||||
immediately-following "case" or "default" label (or
|
||||
the basic block after the "switch" statement for the
|
||||
last label.)
|
||||
*/
|
||||
void SwitchInst(llvm::Value *expr, llvm::BasicBlock *defaultBlock,
|
||||
const std::vector<std::pair<int, llvm::BasicBlock *> > &caseBlocks,
|
||||
const std::map<llvm::BasicBlock *, llvm::BasicBlock *> &nextBlocks);
|
||||
|
||||
/** Generates code for a "default" label after a "switch" statement.
|
||||
The checkMask parameter indicates whether additional code should be
|
||||
generated to check to see if the execution mask is all off after
|
||||
the default label (in which case a jump to the following label will
|
||||
be issued. */
|
||||
void EmitDefaultLabel(bool checkMask, SourcePos pos);
|
||||
|
||||
/** Generates code for a "case" label after a "switch" statement. See
|
||||
the documentation for EmitDefaultLabel() for discussion of the
|
||||
checkMask parameter. */
|
||||
void EmitCaseLabel(int value, bool checkMask, SourcePos pos);
|
||||
|
||||
/** Returns the current number of nested levels of 'varying' control
|
||||
flow */
|
||||
@@ -193,6 +248,15 @@ public:
|
||||
|
||||
bool InForeachLoop() const;
|
||||
|
||||
/** Temporarily disables emission of performance warnings from gathers
|
||||
and scatters from subsequent code. */
|
||||
void DisableGatherScatterWarnings();
|
||||
|
||||
/** Reenables emission of gather/scatter performance warnings. */
|
||||
void EnableGatherScatterWarnings();
|
||||
|
||||
void SetContinueTarget(llvm::BasicBlock *bb) { continueTarget = bb; }
|
||||
|
||||
/** Step through the code and find label statements; create a basic
|
||||
block for each one, so that subsequent calls to
|
||||
GetLabeledBasicBlock() return the corresponding basic block. */
|
||||
@@ -202,6 +266,10 @@ public:
|
||||
new basic block that it starts. */
|
||||
llvm::BasicBlock *GetLabeledBasicBlock(const std::string &label);
|
||||
|
||||
/** Returns a vector of all labels in the context. This is
|
||||
simply the key set of the labelMap */
|
||||
std::vector<std::string> GetLabels();
|
||||
|
||||
/** Called to generate code for 'return' statement; value is the
|
||||
expression in the return statement (if non-NULL), and
|
||||
doCoherenceCheck indicates whether instructions should be generated
|
||||
@@ -211,7 +279,7 @@ public:
|
||||
/** @} */
|
||||
|
||||
/** @name Small helper/utility routines
|
||||
@{
|
||||
@{
|
||||
*/
|
||||
/** Given a boolean mask value of type LLVMTypes::MaskType, return an
|
||||
i1 value that indicates if any of the mask lanes are on. */
|
||||
@@ -222,7 +290,11 @@ public:
|
||||
llvm::Value *All(llvm::Value *mask);
|
||||
|
||||
/** Given a boolean mask value of type LLVMTypes::MaskType, return an
|
||||
i32 value wherein the i'th bit is on if and only if the i'th lane
|
||||
i1 value that indicates if all of the mask lanes are off. */
|
||||
llvm::Value *None(llvm::Value *mask);
|
||||
|
||||
/** Given a boolean mask value of type LLVMTypes::MaskType, return an
|
||||
i64 value wherein the i'th bit is on if and only if the i'th lane
|
||||
of the mask is on. */
|
||||
llvm::Value *LaneMask(llvm::Value *mask);
|
||||
|
||||
@@ -230,6 +302,18 @@ public:
|
||||
that indicates whether the two masks are equal. */
|
||||
llvm::Value *MasksAllEqual(llvm::Value *mask1, llvm::Value *mask2);
|
||||
|
||||
/** generate constantvector, which contains programindex, i.e.
|
||||
< i32 0, i32 1, i32 2, i32 3> */
|
||||
llvm::Value *ProgramIndexVector(bool is32bits = true);
|
||||
#ifdef ISPC_NVPTX_ENABLED
|
||||
llvm::Value *ProgramIndexVectorPTX(bool is32bits = true);
|
||||
|
||||
/** Issues a call to __insert_int8/int16/int32/int64/float/double */
|
||||
llvm::Value* Insert(llvm::Value *vector, llvm::Value *lane, llvm::Value *scalar);
|
||||
/** Issues a call to __extract_int8/int16/int32/int64/float/double */
|
||||
llvm::Value* Extract(llvm::Value *vector, llvm::Value *lane);
|
||||
#endif
|
||||
|
||||
/** Given a string, create an anonymous global variable to hold its
|
||||
value and return the pointer to the string. */
|
||||
llvm::Value *GetStringPtr(const std::string &str);
|
||||
@@ -267,8 +351,13 @@ public:
|
||||
llvm::Instruction for convenience; in calling code we often have
|
||||
Instructions stored using Value pointers; the code here returns
|
||||
silently if it's not actually given an instruction. */
|
||||
void AddDebugPos(llvm::Value *instruction, const SourcePos *pos = NULL,
|
||||
void AddDebugPos(llvm::Value *instruction, const SourcePos *pos = NULL,
|
||||
#if ISPC_LLVM_VERSION <= ISPC_LLVM_3_6
|
||||
llvm::DIScope *scope = NULL);
|
||||
#else /* LLVM 3.7+ */
|
||||
llvm::DIScope *scope = NULL);
|
||||
//llvm::MDScope *scope = NULL );
|
||||
#endif
|
||||
|
||||
/** Inform the debugging information generation code that a new scope
|
||||
is starting in the source program. */
|
||||
@@ -280,7 +369,11 @@ public:
|
||||
|
||||
/** Returns the llvm::DIScope corresponding to the current program
|
||||
scope. */
|
||||
#if ISPC_LLVM_VERSION <= ISPC_LLVM_3_6
|
||||
llvm::DIScope GetDIScope() const;
|
||||
#else // LLVM 3.7++
|
||||
llvm::DIScope *GetDIScope() const;
|
||||
#endif
|
||||
|
||||
/** Emits debugging information for the variable represented by
|
||||
sym. */
|
||||
@@ -288,7 +381,7 @@ public:
|
||||
|
||||
/** Emits debugging information for the function parameter represented
|
||||
by sym. */
|
||||
void EmitFunctionParameterDebugInfo(Symbol *sym);
|
||||
void EmitFunctionParameterDebugInfo(Symbol *sym, int parameterNum);
|
||||
/** @} */
|
||||
|
||||
/** @name IR instruction emission
|
||||
@@ -296,7 +389,7 @@ public:
|
||||
instructions. See the LLVM assembly language reference manual
|
||||
(http://llvm.org/docs/LangRef.html) and the LLVM doxygen documentaion
|
||||
(http://llvm.org/doxygen) for more information. Here we will only
|
||||
document significant generalizations to the functionality of the
|
||||
document significant generalizations to the functionality of the
|
||||
corresponding basic LLVM instructions.
|
||||
|
||||
Beyond actually emitting the instruction, the implementations of
|
||||
@@ -312,7 +405,7 @@ public:
|
||||
this also handles applying the given operation to the vector
|
||||
elements. */
|
||||
llvm::Value *BinaryOperator(llvm::Instruction::BinaryOps inst,
|
||||
llvm::Value *v0, llvm::Value *v1,
|
||||
llvm::Value *v0, llvm::Value *v1,
|
||||
const char *name = NULL);
|
||||
|
||||
/** Emit the "not" operator. Like BinaryOperator(), this also handles
|
||||
@@ -322,7 +415,7 @@ public:
|
||||
/** Emit a comparison instruction. If the operands are VectorTypes,
|
||||
then a value for the corresponding boolean VectorType is
|
||||
returned. */
|
||||
llvm::Value *CmpInst(llvm::Instruction::OtherOps inst,
|
||||
llvm::Value *CmpInst(llvm::Instruction::OtherOps inst,
|
||||
llvm::CmpInst::Predicate pred,
|
||||
llvm::Value *v0, llvm::Value *v1, const char *name = NULL);
|
||||
|
||||
@@ -330,25 +423,35 @@ public:
|
||||
array, for pointer types). */
|
||||
llvm::Value *SmearUniform(llvm::Value *value, const char *name = NULL);
|
||||
|
||||
llvm::Value *BitCastInst(llvm::Value *value, LLVM_TYPE_CONST llvm::Type *type,
|
||||
llvm::Value *BitCastInst(llvm::Value *value, llvm::Type *type,
|
||||
const char *name = NULL);
|
||||
llvm::Value *PtrToIntInst(llvm::Value *value, const char *name = NULL);
|
||||
llvm::Value *PtrToIntInst(llvm::Value *value, LLVM_TYPE_CONST llvm::Type *type,
|
||||
llvm::Value *PtrToIntInst(llvm::Value *value, llvm::Type *type,
|
||||
const char *name = NULL);
|
||||
llvm::Value *IntToPtrInst(llvm::Value *value, LLVM_TYPE_CONST llvm::Type *type,
|
||||
llvm::Value *IntToPtrInst(llvm::Value *value, llvm::Type *type,
|
||||
const char *name = NULL);
|
||||
|
||||
llvm::Instruction *TruncInst(llvm::Value *value, LLVM_TYPE_CONST llvm::Type *type,
|
||||
llvm::Instruction *TruncInst(llvm::Value *value, llvm::Type *type,
|
||||
const char *name = NULL);
|
||||
llvm::Instruction *CastInst(llvm::Instruction::CastOps op, llvm::Value *value,
|
||||
LLVM_TYPE_CONST llvm::Type *type, const char *name = NULL);
|
||||
llvm::Instruction *FPCastInst(llvm::Value *value, LLVM_TYPE_CONST llvm::Type *type,
|
||||
llvm::Type *type, const char *name = NULL);
|
||||
llvm::Instruction *FPCastInst(llvm::Value *value, llvm::Type *type,
|
||||
const char *name = NULL);
|
||||
llvm::Instruction *SExtInst(llvm::Value *value, LLVM_TYPE_CONST llvm::Type *type,
|
||||
llvm::Instruction *SExtInst(llvm::Value *value, llvm::Type *type,
|
||||
const char *name = NULL);
|
||||
llvm::Instruction *ZExtInst(llvm::Value *value, LLVM_TYPE_CONST llvm::Type *type,
|
||||
llvm::Instruction *ZExtInst(llvm::Value *value, llvm::Type *type,
|
||||
const char *name = NULL);
|
||||
|
||||
/** Given two integer-typed values (but possibly one vector and the
|
||||
other not, and or of possibly-different bit-widths), update their
|
||||
values as needed so that the two have the same (more general)
|
||||
type. */
|
||||
void MatchIntegerTypes(llvm::Value **v0, llvm::Value **v1);
|
||||
|
||||
/** Create a new slice pointer out of the given pointer to an soa type
|
||||
and an integer offset to a slice within that type. */
|
||||
llvm::Value *MakeSlicePointer(llvm::Value *ptr, llvm::Value *offset);
|
||||
|
||||
/** These GEP methods are generalizations of the standard ones in LLVM;
|
||||
they support both uniform and varying basePtr values as well as
|
||||
uniform and varying index values (arrays of indices). Varying base
|
||||
@@ -369,7 +472,8 @@ public:
|
||||
the type of the pointer, though it may be NULL if the base pointer
|
||||
is uniform. */
|
||||
llvm::Value *AddElementOffset(llvm::Value *basePtr, int elementNum,
|
||||
const Type *ptrType, const char *name = NULL);
|
||||
const Type *ptrType, const char *name = NULL,
|
||||
const PointerType **resultPtrType = NULL);
|
||||
|
||||
/** Load from the memory location(s) given by lvalue, using the given
|
||||
mask. The lvalue may be varying, in which case this corresponds to
|
||||
@@ -377,7 +481,8 @@ public:
|
||||
pointer values given by the lvalue. If the lvalue is not varying,
|
||||
then both the mask pointer and the type pointer may be NULL. */
|
||||
llvm::Value *LoadInst(llvm::Value *ptr, llvm::Value *mask,
|
||||
const Type *ptrType, const char *name = NULL);
|
||||
const Type *ptrType, const char *name = NULL,
|
||||
bool one_elem = false);
|
||||
|
||||
llvm::Value *LoadInst(llvm::Value *ptr, const char *name = NULL);
|
||||
|
||||
@@ -386,9 +491,9 @@ public:
|
||||
allocated at the given alignment. By default, the alloca
|
||||
instruction is added at the start of the function in the entry
|
||||
basic block; if it should be added to the current basic block, then
|
||||
the atEntryBlock parameter should be false. */
|
||||
llvm::Value *AllocaInst(LLVM_TYPE_CONST llvm::Type *llvmType,
|
||||
const char *name = NULL, int align = 0,
|
||||
the atEntryBlock parameter should be false. */
|
||||
llvm::Value *AllocaInst(llvm::Type *llvmType,
|
||||
const char *name = NULL, int align = 0,
|
||||
bool atEntryBlock = true);
|
||||
|
||||
/** Standard store instruction; for this variant, the lvalue must be a
|
||||
@@ -400,7 +505,14 @@ public:
|
||||
varying, the given storeMask is used to mask the stores so that
|
||||
they only execute for the active program instances. */
|
||||
void StoreInst(llvm::Value *value, llvm::Value *ptr,
|
||||
llvm::Value *storeMask, const Type *ptrType);
|
||||
llvm::Value *storeMask, const Type *valueType,
|
||||
const Type *ptrType);
|
||||
|
||||
/** Copy count bytes of memory from the location pointed to by src to
|
||||
the location pointed to by dest. (src and dest must not be
|
||||
overlapping.) */
|
||||
void MemcpyInst(llvm::Value *dest, llvm::Value *src, llvm::Value *count,
|
||||
llvm::Value *align = NULL);
|
||||
|
||||
void BranchInst(llvm::BasicBlock *block);
|
||||
void BranchInst(llvm::BasicBlock *trueBlock, llvm::BasicBlock *falseBlock,
|
||||
@@ -414,10 +526,20 @@ public:
|
||||
/** This convenience method maps to an llvm::InsertElementInst if the
|
||||
given value is a llvm::VectorType, and to an llvm::InsertValueInst
|
||||
otherwise. */
|
||||
llvm::Value *InsertInst(llvm::Value *v, llvm::Value *eltVal, int elt,
|
||||
llvm::Value *InsertInst(llvm::Value *v, llvm::Value *eltVal, int elt,
|
||||
const char *name = NULL);
|
||||
|
||||
llvm::PHINode *PhiNode(LLVM_TYPE_CONST llvm::Type *type, int count,
|
||||
/** This convenience method maps to an llvm::ShuffleVectorInst. */
|
||||
llvm::Value *ShuffleInst(llvm::Value *v1, llvm::Value *v2, llvm::Value *mask,
|
||||
const char *name = NULL);
|
||||
|
||||
/** This convenience method to generate broadcast pattern. It takes a value
|
||||
and a vector type. Type of the value must match element type of the
|
||||
vector. */
|
||||
llvm::Value *BroadcastValue(llvm::Value *v, llvm::Type *vecType,
|
||||
const char *name = NULL);
|
||||
|
||||
llvm::PHINode *PhiNode(llvm::Type *type, int count,
|
||||
const char *name = NULL);
|
||||
llvm::Instruction *SelectInst(llvm::Value *test, llvm::Value *val0,
|
||||
llvm::Value *val1, const char *name = NULL);
|
||||
@@ -443,9 +565,9 @@ public:
|
||||
|
||||
/** Launch an asynchronous task to run the given function, passing it
|
||||
he given argument values. */
|
||||
llvm::Value *LaunchInst(llvm::Value *callee,
|
||||
llvm::Value *LaunchInst(llvm::Value *callee,
|
||||
std::vector<llvm::Value *> &argVals,
|
||||
llvm::Value *launchCount);
|
||||
llvm::Value *launchCount[3]);
|
||||
|
||||
void SyncInst();
|
||||
|
||||
@@ -488,14 +610,14 @@ private:
|
||||
for error messages and debugging symbols. */
|
||||
SourcePos funcStartPos;
|
||||
|
||||
/** If currently in a loop body, the value of the mask at the start of
|
||||
the loop. */
|
||||
llvm::Value *loopMask;
|
||||
/** If currently in a loop body or switch statement, the value of the
|
||||
mask at the start of it. */
|
||||
llvm::Value *blockEntryMask;
|
||||
|
||||
/** If currently in a loop body, this is a pointer to memory to store a
|
||||
mask value that represents which of the lanes have executed a
|
||||
'break' statement. If we're not in a loop body, this should be
|
||||
NULL. */
|
||||
/** If currently in a loop body or switch statement, this is a pointer
|
||||
to memory to store a mask value that represents which of the lanes
|
||||
have executed a 'break' statement. If we're not in a loop body or
|
||||
switch, this should be NULL. */
|
||||
llvm::Value *breakLanesPtr;
|
||||
|
||||
/** Similar to breakLanesPtr, if we're inside a loop, this is a pointer
|
||||
@@ -503,16 +625,49 @@ private:
|
||||
'continue' statement. */
|
||||
llvm::Value *continueLanesPtr;
|
||||
|
||||
/** If we're inside a loop, this gives the basic block immediately
|
||||
after the current loop, which we will jump to if all of the lanes
|
||||
have executed a break statement or are otherwise done with the
|
||||
loop. */
|
||||
/** If we're inside a loop or switch statement, this gives the basic
|
||||
block immediately after the current loop or switch, which we will
|
||||
jump to if all of the lanes have executed a break statement or are
|
||||
otherwise done with it. */
|
||||
llvm::BasicBlock *breakTarget;
|
||||
|
||||
/** If we're inside a loop, this gives the block to jump to if all of
|
||||
the running lanes have executed a 'continue' statement. */
|
||||
llvm::BasicBlock *continueTarget;
|
||||
|
||||
/** @name Switch statement state
|
||||
|
||||
These variables store various state that's active when we're
|
||||
generating code for a switch statement. They should all be NULL
|
||||
outside of a switch.
|
||||
@{
|
||||
*/
|
||||
|
||||
/** The value of the expression used to determine which case in the
|
||||
statements after the switch to execute. */
|
||||
llvm::Value *switchExpr;
|
||||
|
||||
/** Map from case label numbers to the basic block that will hold code
|
||||
for that case. */
|
||||
const std::vector<std::pair<int, llvm::BasicBlock *> > *caseBlocks;
|
||||
|
||||
/** The basic block of code to run for the "default" label in the
|
||||
switch statement. */
|
||||
llvm::BasicBlock *defaultBlock;
|
||||
|
||||
/** For each basic block for the code for cases (and the default label,
|
||||
if present), this map gives the basic block for the immediately
|
||||
following case/default label. */
|
||||
const std::map<llvm::BasicBlock *, llvm::BasicBlock *> *nextBlocks;
|
||||
|
||||
/** Records whether the switch condition was uniform; this is a
|
||||
distinct notion from whether the switch represents uniform or
|
||||
varying control flow; we may have varying control flow from a
|
||||
uniform switch condition if there is a 'break' inside the switch
|
||||
that's under varying control flow. */
|
||||
bool switchConditionWasUniform;
|
||||
/** @} */
|
||||
|
||||
/** A pointer to memory that records which of the program instances
|
||||
have executed a 'return' statement (and are thus really truly done
|
||||
running any more instructions in this functions. */
|
||||
@@ -530,17 +685,31 @@ private:
|
||||
emitted. */
|
||||
std::vector<CFInfo *> controlFlowInfo;
|
||||
|
||||
#if ISPC_LLVM_VERSION <= ISPC_LLVM_3_6
|
||||
/** DIFile object corresponding to the source file where the current
|
||||
function was defined (used for debugging info0. */
|
||||
function was defined (used for debugging info). */
|
||||
llvm::DIFile diFile;
|
||||
|
||||
/** DISubprogram corresponding to this function (used for debugging
|
||||
info). */
|
||||
llvm::DISubprogram diFunction;
|
||||
llvm::DISubprogram diSubprogram;
|
||||
|
||||
/** These correspond to the current set of nested scopes in the
|
||||
function. */
|
||||
std::vector<llvm::DILexicalBlock> debugScopes;
|
||||
#else // LLVM 3.7++
|
||||
/** DIFile object corresponding to the source file where the current
|
||||
function was defined (used for debugging info). */
|
||||
llvm::DIFile *diFile;
|
||||
|
||||
/** DISubprogram corresponding to this function (used for debugging
|
||||
info). */
|
||||
llvm::DISubprogram *diSubprogram;
|
||||
|
||||
/** These correspond to the current set of nested scopes in the
|
||||
function. */
|
||||
std::vector<llvm::DIScope *> debugScopes;
|
||||
#endif
|
||||
|
||||
/** True if a 'launch' statement has been encountered in the function. */
|
||||
bool launchedTasks;
|
||||
@@ -550,27 +719,43 @@ private:
|
||||
tasks launched from the current function. */
|
||||
llvm::Value *launchGroupHandlePtr;
|
||||
|
||||
/** Nesting count of the number of times calling code has disabled (and
|
||||
not yet reenabled) gather/scatter performance warnings. */
|
||||
int disableGSWarningCount;
|
||||
|
||||
std::map<std::string, llvm::BasicBlock *> labelMap;
|
||||
|
||||
static bool initLabelBBlocks(ASTNode *node, void *data);
|
||||
|
||||
llvm::Value *pointerVectorToVoidPointers(llvm::Value *value);
|
||||
static void addGSMetadata(llvm::Value *inst, SourcePos pos);
|
||||
bool ifsInLoopAllUniform() const;
|
||||
bool ifsInCFAllUniform(int cfType) const;
|
||||
void jumpIfAllLoopLanesAreDone(llvm::BasicBlock *target);
|
||||
llvm::Value *emitGatherCallback(llvm::Value *lvalue, llvm::Value *retPtr);
|
||||
|
||||
llvm::Value *applyVaryingGEP(llvm::Value *basePtr, llvm::Value *index,
|
||||
llvm::Value *applyVaryingGEP(llvm::Value *basePtr, llvm::Value *index,
|
||||
const Type *ptrType);
|
||||
|
||||
void restoreMaskGivenReturns(llvm::Value *oldMask);
|
||||
void addSwitchMaskCheck(llvm::Value *mask);
|
||||
bool inSwitchStatement() const;
|
||||
llvm::Value *getMaskAtSwitchEntry();
|
||||
|
||||
void scatter(llvm::Value *value, llvm::Value *ptr, const Type *ptrType,
|
||||
llvm::Value *mask);
|
||||
CFInfo *popCFState();
|
||||
|
||||
void scatter(llvm::Value *value, llvm::Value *ptr, const Type *valueType,
|
||||
const Type *ptrType, llvm::Value *mask);
|
||||
void maskedStore(llvm::Value *value, llvm::Value *ptr, const Type *ptrType,
|
||||
llvm::Value *mask);
|
||||
llvm::Value *gather(llvm::Value *ptr, const Type *ptrType, llvm::Value *mask,
|
||||
const char *name);
|
||||
void storeUniformToSOA(llvm::Value *value, llvm::Value *ptr,
|
||||
llvm::Value *mask, const Type *valueType,
|
||||
const PointerType *ptrType);
|
||||
llvm::Value *loadUniformFromSOA(llvm::Value *ptr, llvm::Value *mask,
|
||||
const PointerType *ptrType, const char *name);
|
||||
|
||||
llvm::Value *gather(llvm::Value *ptr, const PointerType *ptrType,
|
||||
llvm::Value *mask, const char *name);
|
||||
|
||||
llvm::Value *addVaryingOffsetsIfNeeded(llvm::Value *ptr, const Type *ptrType);
|
||||
};
|
||||
|
||||
|
||||
638
decl.cpp
638
decl.cpp
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright (c) 2010-2011, Intel Corporation
|
||||
Copyright (c) 2010-2013, Intel Corporation
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
@@ -28,12 +28,12 @@
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
/** @file decl.cpp
|
||||
@brief Implementations of classes related to turning declarations into
|
||||
symbols and types.
|
||||
@brief Implementations of classes related to turning declarations into
|
||||
symbol names and types.
|
||||
*/
|
||||
|
||||
#include "decl.h"
|
||||
@@ -44,6 +44,7 @@
|
||||
#include "stmt.h"
|
||||
#include "expr.h"
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <set>
|
||||
|
||||
static void
|
||||
@@ -55,26 +56,45 @@ lPrintTypeQualifiers(int typeQualifiers) {
|
||||
if (typeQualifiers & TYPEQUAL_TASK) printf("task ");
|
||||
if (typeQualifiers & TYPEQUAL_SIGNED) printf("signed ");
|
||||
if (typeQualifiers & TYPEQUAL_UNSIGNED) printf("unsigned ");
|
||||
if (typeQualifiers & TYPEQUAL_EXPORT) printf("export ");
|
||||
if (typeQualifiers & TYPEQUAL_UNMASKED) printf("unmasked ");
|
||||
}
|
||||
|
||||
|
||||
/** Given a Type and a set of type qualifiers, apply the type qualifiers to
|
||||
the type, returning the type that is the result.
|
||||
the type, returning the type that is the result.
|
||||
*/
|
||||
static const Type *
|
||||
lApplyTypeQualifiers(int typeQualifiers, const Type *type, SourcePos pos) {
|
||||
if (type == NULL)
|
||||
return NULL;
|
||||
|
||||
if ((typeQualifiers & TYPEQUAL_CONST) != 0)
|
||||
if ((typeQualifiers & TYPEQUAL_CONST) != 0) {
|
||||
type = type->GetAsConstType();
|
||||
}
|
||||
|
||||
if ((typeQualifiers & TYPEQUAL_UNIFORM) != 0)
|
||||
type = type->GetAsUniformType();
|
||||
else if ((typeQualifiers & TYPEQUAL_VARYING) != 0)
|
||||
type = type->GetAsVaryingType();
|
||||
else
|
||||
type = type->GetAsUnboundVariabilityType();
|
||||
if ( ((typeQualifiers & TYPEQUAL_UNIFORM) != 0)
|
||||
&& ((typeQualifiers & TYPEQUAL_VARYING) != 0) ) {
|
||||
Error(pos, "Type \"%s\" cannot be qualified with both uniform and varying.",
|
||||
type->GetString().c_str());
|
||||
}
|
||||
|
||||
if ((typeQualifiers & TYPEQUAL_UNIFORM) != 0) {
|
||||
if (type->IsVoidType())
|
||||
Error(pos, "\"uniform\" qualifier is illegal with \"void\" type.");
|
||||
else
|
||||
type = type->GetAsUniformType();
|
||||
}
|
||||
else if ((typeQualifiers & TYPEQUAL_VARYING) != 0) {
|
||||
if (type->IsVoidType())
|
||||
Error(pos, "\"varying\" qualifier is illegal with \"void\" type.");
|
||||
else
|
||||
type = type->GetAsVaryingType();
|
||||
}
|
||||
else {
|
||||
if (type->IsVoidType() == false)
|
||||
type = type->GetAsUnboundVariabilityType();
|
||||
}
|
||||
|
||||
if ((typeQualifiers & TYPEQUAL_UNSIGNED) != 0) {
|
||||
if ((typeQualifiers & TYPEQUAL_SIGNED) != 0)
|
||||
@@ -84,15 +104,20 @@ lApplyTypeQualifiers(int typeQualifiers, const Type *type, SourcePos pos) {
|
||||
const Type *unsignedType = type->GetAsUnsignedType();
|
||||
if (unsignedType != NULL)
|
||||
type = unsignedType;
|
||||
else
|
||||
else {
|
||||
const Type *resolvedType =
|
||||
type->ResolveUnboundVariability(Variability::Varying);
|
||||
Error(pos, "\"unsigned\" qualifier is illegal with \"%s\" type.",
|
||||
type->ResolveUnboundVariability(Type::Varying)->GetString().c_str());
|
||||
resolvedType->GetString().c_str());
|
||||
}
|
||||
}
|
||||
|
||||
if ((typeQualifiers & TYPEQUAL_SIGNED) != 0 && type->IsIntType() == false)
|
||||
if ((typeQualifiers & TYPEQUAL_SIGNED) != 0 && type->IsIntType() == false) {
|
||||
const Type *resolvedType =
|
||||
type->ResolveUnboundVariability(Variability::Varying);
|
||||
Error(pos, "\"signed\" qualifier is illegal with non-integer type "
|
||||
"\"%s\".",
|
||||
type->ResolveUnboundVariability(Type::Varying)->GetString().c_str());
|
||||
"\"%s\".", resolvedType->GetString().c_str());
|
||||
}
|
||||
|
||||
return type;
|
||||
}
|
||||
@@ -107,23 +132,84 @@ DeclSpecs::DeclSpecs(const Type *t, StorageClass sc, int tq) {
|
||||
typeQualifiers = tq;
|
||||
soaWidth = 0;
|
||||
vectorSize = 0;
|
||||
if (t != NULL) {
|
||||
if (m->symbolTable->ContainsType(t)) {
|
||||
// Typedefs might have uniform/varying qualifiers inside.
|
||||
if (t->IsVaryingType()) {
|
||||
typeQualifiers |= TYPEQUAL_VARYING;
|
||||
}
|
||||
else if (t->IsUniformType()) {
|
||||
typeQualifiers |= TYPEQUAL_UNIFORM;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
const Type *
|
||||
DeclSpecs::GetBaseType(SourcePos pos) const {
|
||||
const Type *bt = baseType;
|
||||
const Type *retType = baseType;
|
||||
|
||||
if (retType == NULL) {
|
||||
Warning(pos, "No type specified in declaration. Assuming int32.");
|
||||
retType = AtomicType::UniformInt32->GetAsUnboundVariabilityType();
|
||||
}
|
||||
|
||||
if (vectorSize > 0) {
|
||||
const AtomicType *atomicType = dynamic_cast<const AtomicType *>(bt);
|
||||
const AtomicType *atomicType = CastType<AtomicType>(retType);
|
||||
if (atomicType == NULL) {
|
||||
Error(pos, "Only atomic types (int, float, ...) are legal for vector "
|
||||
"types.");
|
||||
return NULL;
|
||||
}
|
||||
bt = new VectorType(atomicType, vectorSize);
|
||||
retType = new VectorType(atomicType, vectorSize);
|
||||
}
|
||||
|
||||
return lApplyTypeQualifiers(typeQualifiers, bt, pos);
|
||||
retType = lApplyTypeQualifiers(typeQualifiers, retType, pos);
|
||||
|
||||
if (soaWidth > 0) {
|
||||
#ifdef ISPC_NVPTX_ENABLED
|
||||
#if 0 /* see stmt.cpp in DeclStmt::EmitCode for work-around of SOAType Declaration */
|
||||
if (g->target->getISA() == Target::NVPTX)
|
||||
{
|
||||
Error(pos, "\"soa\" data types are currently not supported with \"nvptx\" target.");
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
#endif /* ISPC_NVPTX_ENABLED */
|
||||
const StructType *st = CastType<StructType>(retType);
|
||||
|
||||
if (st == NULL) {
|
||||
Error(pos, "Illegal to provide soa<%d> qualifier with non-struct "
|
||||
"type \"%s\".", soaWidth, retType->GetString().c_str());
|
||||
return NULL;
|
||||
}
|
||||
else if (soaWidth <= 0 || (soaWidth & (soaWidth - 1)) != 0) {
|
||||
Error(pos, "soa<%d> width illegal. Value must be positive power "
|
||||
"of two.", soaWidth);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (st->IsUniformType()) {
|
||||
Error(pos, "\"uniform\" qualifier and \"soa<%d>\" qualifier can't "
|
||||
"both be used in a type declaration.", soaWidth);
|
||||
return NULL;
|
||||
}
|
||||
else if (st->IsVaryingType()) {
|
||||
Error(pos, "\"varying\" qualifier and \"soa<%d>\" qualifier can't "
|
||||
"both be used in a type declaration.", soaWidth);
|
||||
return NULL;
|
||||
}
|
||||
else
|
||||
retType = st->GetAsSOAType(soaWidth);
|
||||
|
||||
if (soaWidth < g->target->getVectorWidth())
|
||||
PerformanceWarning(pos, "soa<%d> width smaller than gang size %d "
|
||||
"currently leads to inefficient code to access "
|
||||
"soa types.", soaWidth, g->target->getVectorWidth());
|
||||
}
|
||||
|
||||
return retType;
|
||||
}
|
||||
|
||||
|
||||
@@ -133,7 +219,6 @@ lGetStorageClassName(StorageClass storageClass) {
|
||||
case SC_NONE: return "";
|
||||
case SC_EXTERN: return "extern";
|
||||
case SC_EXTERN_C: return "extern \"C\"";
|
||||
case SC_EXPORT: return "export";
|
||||
case SC_STATIC: return "static";
|
||||
case SC_TYPEDEF: return "typedef";
|
||||
default: FATAL("Unhandled storage class in lGetStorageClassName");
|
||||
@@ -158,35 +243,35 @@ DeclSpecs::Print() const {
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// Declarator
|
||||
|
||||
Declarator::Declarator(DeclaratorKind dk, SourcePos p)
|
||||
: pos(p), kind(dk) {
|
||||
Declarator::Declarator(DeclaratorKind dk, SourcePos p)
|
||||
: pos(p), kind(dk) {
|
||||
child = NULL;
|
||||
typeQualifiers = 0;
|
||||
storageClass = SC_NONE;
|
||||
arraySize = -1;
|
||||
sym = NULL;
|
||||
type = NULL;
|
||||
initExpr = NULL;
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
Declarator::InitFromDeclSpecs(DeclSpecs *ds) {
|
||||
const Type *t = GetType(ds);
|
||||
Symbol *sym = GetSymbol();
|
||||
if (sym != NULL) {
|
||||
sym->type = t;
|
||||
sym->storageClass = ds->storageClass;
|
||||
const Type *baseType = ds->GetBaseType(pos);
|
||||
|
||||
InitFromType(baseType, ds);
|
||||
|
||||
if (type == NULL) {
|
||||
AssertPos(pos, m->errorCount > 0);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
storageClass = ds->storageClass;
|
||||
|
||||
Symbol *
|
||||
Declarator::GetSymbol() const {
|
||||
// The symbol lives at the last child in the chain, so walk down there
|
||||
// and return the one there.
|
||||
const Declarator *d = this;
|
||||
while (d->child != NULL)
|
||||
d = d->child;
|
||||
return d->sym;
|
||||
if (ds->declSpecList.size() > 0 &&
|
||||
CastType<FunctionType>(type) == NULL) {
|
||||
Error(pos, "__declspec specifiers for non-function type \"%s\" are "
|
||||
"not used.", type->GetString().c_str());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -196,11 +281,11 @@ Declarator::Print(int indent) const {
|
||||
pos.Print();
|
||||
|
||||
lPrintTypeQualifiers(typeQualifiers);
|
||||
Symbol *sym = GetSymbol();
|
||||
if (sym != NULL)
|
||||
printf("%s", sym->name.c_str());
|
||||
printf("%s ", lGetStorageClassName(storageClass));
|
||||
if (name.size() > 0)
|
||||
printf("%s", name.c_str());
|
||||
else
|
||||
printf("(null symbol)");
|
||||
printf("(unnamed)");
|
||||
|
||||
printf(", array size = %d", arraySize);
|
||||
|
||||
@@ -234,115 +319,121 @@ Declarator::Print(int indent) const {
|
||||
}
|
||||
|
||||
|
||||
Symbol *
|
||||
Declarator::GetFunctionInfo(DeclSpecs *ds, std::vector<Symbol *> *funArgs) {
|
||||
const FunctionType *type =
|
||||
dynamic_cast<const FunctionType *>(GetType(ds));
|
||||
if (type == NULL)
|
||||
return NULL;
|
||||
|
||||
Symbol *declSym = GetSymbol();
|
||||
Assert(declSym != NULL);
|
||||
|
||||
// Get the symbol for the function from the symbol table. (It should
|
||||
// already have been added to the symbol table by AddGlobal() by the
|
||||
// time we get here.)
|
||||
Symbol *funSym = m->symbolTable->LookupFunction(declSym->name.c_str(), type);
|
||||
if (funSym != NULL)
|
||||
// May be NULL due to error earlier in compilation
|
||||
funSym->pos = pos;
|
||||
|
||||
// Walk down to the declarator for the function. (We have to get past
|
||||
// the stuff that specifies the function's return type before we get to
|
||||
// the function's declarator.)
|
||||
Declarator *d = this;
|
||||
while (d != NULL && d->kind != DK_FUNCTION)
|
||||
d = d->child;
|
||||
Assert(d != NULL);
|
||||
|
||||
for (unsigned int i = 0; i < d->functionParams.size(); ++i) {
|
||||
Symbol *sym = d->GetSymbolForFunctionParameter(i);
|
||||
sym->type = sym->type->ResolveUnboundVariability(Type::Varying);
|
||||
funArgs->push_back(sym);
|
||||
}
|
||||
|
||||
funSym->type = funSym->type->ResolveUnboundVariability(Type::Varying);
|
||||
|
||||
return funSym;
|
||||
}
|
||||
|
||||
|
||||
const Type *
|
||||
Declarator::GetType(const Type *base, DeclSpecs *ds) const {
|
||||
void
|
||||
Declarator::InitFromType(const Type *baseType, DeclSpecs *ds) {
|
||||
bool hasUniformQual = ((typeQualifiers & TYPEQUAL_UNIFORM) != 0);
|
||||
bool hasVaryingQual = ((typeQualifiers & TYPEQUAL_VARYING) != 0);
|
||||
bool isTask = ((typeQualifiers & TYPEQUAL_TASK) != 0);
|
||||
bool isExported = ((typeQualifiers & TYPEQUAL_EXPORT) != 0);
|
||||
bool isConst = ((typeQualifiers & TYPEQUAL_CONST) != 0);
|
||||
bool isUnmasked = ((typeQualifiers & TYPEQUAL_UNMASKED) != 0);
|
||||
|
||||
if (hasUniformQual && hasVaryingQual) {
|
||||
Error(pos, "Can't provide both \"uniform\" and \"varying\" qualifiers.");
|
||||
return NULL;
|
||||
return;
|
||||
}
|
||||
if (kind != DK_FUNCTION && isTask)
|
||||
if (kind != DK_FUNCTION && isTask) {
|
||||
Error(pos, "\"task\" qualifier illegal in variable declaration.");
|
||||
return;
|
||||
}
|
||||
if (kind != DK_FUNCTION && isUnmasked) {
|
||||
Error(pos, "\"unmasked\" qualifier illegal in variable declaration.");
|
||||
return;
|
||||
}
|
||||
if (kind != DK_FUNCTION && isExported) {
|
||||
Error(pos, "\"export\" qualifier illegal in variable declaration.");
|
||||
return;
|
||||
}
|
||||
|
||||
Type::Variability variability = Type::Unbound;
|
||||
Variability variability(Variability::Unbound);
|
||||
if (hasUniformQual)
|
||||
variability = Type::Uniform;
|
||||
variability = Variability::Uniform;
|
||||
else if (hasVaryingQual)
|
||||
variability = Type::Varying;
|
||||
variability = Variability::Varying;
|
||||
|
||||
const Type *type = base;
|
||||
switch (kind) {
|
||||
case DK_BASE:
|
||||
if (kind == DK_BASE) {
|
||||
// All of the type qualifiers should be in the DeclSpecs for the
|
||||
// base declarator
|
||||
Assert(typeQualifiers == 0);
|
||||
Assert(child == NULL);
|
||||
return type;
|
||||
|
||||
case DK_POINTER:
|
||||
type = new PointerType(type, variability, isConst);
|
||||
if (child != NULL)
|
||||
return child->GetType(type, ds);
|
||||
AssertPos(pos, typeQualifiers == 0);
|
||||
AssertPos(pos, child == NULL);
|
||||
type = baseType;
|
||||
}
|
||||
else if (kind == DK_POINTER) {
|
||||
/* For now, any pointer to an SOA type gets the slice property; if
|
||||
we add the capability to declare pointers as slices or not,
|
||||
we'll want to set this based on a type qualifier here. */
|
||||
const Type *ptrType = new PointerType(baseType, variability, isConst,
|
||||
baseType->IsSOAType());
|
||||
if (child != NULL) {
|
||||
child->InitFromType(ptrType, ds);
|
||||
type = child->type;
|
||||
name = child->name;
|
||||
}
|
||||
else
|
||||
return type;
|
||||
break;
|
||||
|
||||
case DK_REFERENCE:
|
||||
if (hasUniformQual)
|
||||
type = ptrType;
|
||||
}
|
||||
else if (kind == DK_REFERENCE) {
|
||||
if (hasUniformQual) {
|
||||
Error(pos, "\"uniform\" qualifier is illegal to apply to references.");
|
||||
if (hasVaryingQual)
|
||||
return;
|
||||
}
|
||||
if (hasVaryingQual) {
|
||||
Error(pos, "\"varying\" qualifier is illegal to apply to references.");
|
||||
if (isConst)
|
||||
return;
|
||||
}
|
||||
if (isConst) {
|
||||
Error(pos, "\"const\" qualifier is to illegal apply to references.");
|
||||
|
||||
return;
|
||||
}
|
||||
// The parser should disallow this already, but double check.
|
||||
if (dynamic_cast<const ReferenceType *>(type) != NULL) {
|
||||
if (CastType<ReferenceType>(baseType) != NULL) {
|
||||
Error(pos, "References to references are illegal.");
|
||||
return NULL;
|
||||
return;
|
||||
}
|
||||
|
||||
type = new ReferenceType(type);
|
||||
if (child != NULL)
|
||||
return child->GetType(type, ds);
|
||||
const Type *refType = new ReferenceType(baseType);
|
||||
if (child != NULL) {
|
||||
child->InitFromType(refType, ds);
|
||||
type = child->type;
|
||||
name = child->name;
|
||||
}
|
||||
else
|
||||
return type;
|
||||
break;
|
||||
type = refType;
|
||||
}
|
||||
else if (kind == DK_ARRAY) {
|
||||
if (baseType->IsVoidType()) {
|
||||
Error(pos, "Arrays of \"void\" type are illegal.");
|
||||
return;
|
||||
}
|
||||
if (CastType<ReferenceType>(baseType)) {
|
||||
Error(pos, "Arrays of references (type \"%s\") are illegal.",
|
||||
baseType->GetString().c_str());
|
||||
return;
|
||||
}
|
||||
|
||||
case DK_ARRAY:
|
||||
type = new ArrayType(type, arraySize);
|
||||
if (child)
|
||||
return child->GetType(type, ds);
|
||||
#ifdef ISPC_NVPTX_ENABLED
|
||||
#if 0 /* NVPTX */
|
||||
if (baseType->IsUniformType())
|
||||
{
|
||||
fprintf(stderr, " detected uniform array of size= %d array= %s\n" ,arraySize,
|
||||
baseType->IsArrayType() ? " true " : " false ");
|
||||
}
|
||||
#endif
|
||||
#endif /* ISPC_NVPTX_ENABLED */
|
||||
const Type *arrayType = new ArrayType(baseType, arraySize);
|
||||
if (child != NULL) {
|
||||
child->InitFromType(arrayType, ds);
|
||||
type = child->type;
|
||||
name = child->name;
|
||||
}
|
||||
else
|
||||
return type;
|
||||
break;
|
||||
|
||||
case DK_FUNCTION: {
|
||||
std::vector<const Type *> args;
|
||||
std::vector<std::string> argNames;
|
||||
std::vector<ConstExpr *> argDefaults;
|
||||
std::vector<SourcePos> argPos;
|
||||
type = arrayType;
|
||||
}
|
||||
else if (kind == DK_FUNCTION) {
|
||||
llvm::SmallVector<const Type *, 8> args;
|
||||
llvm::SmallVector<std::string, 8> argNames;
|
||||
llvm::SmallVector<Expr *, 8> argDefaults;
|
||||
llvm::SmallVector<SourcePos, 8> argPos;
|
||||
|
||||
// Loop over the function arguments and store the names, types,
|
||||
// default values (if any), and source file positions each one in
|
||||
@@ -350,15 +441,44 @@ Declarator::GetType(const Type *base, DeclSpecs *ds) const {
|
||||
for (unsigned int i = 0; i < functionParams.size(); ++i) {
|
||||
Declaration *d = functionParams[i];
|
||||
|
||||
Symbol *sym = GetSymbolForFunctionParameter(i);
|
||||
if (d == NULL) {
|
||||
AssertPos(pos, m->errorCount > 0);
|
||||
continue;
|
||||
}
|
||||
if (d->declarators.size() == 0) {
|
||||
// function declaration like foo(float), w/o a name for the
|
||||
// parameter; wire up a placeholder Declarator for it
|
||||
d->declarators.push_back(new Declarator(DK_BASE, pos));
|
||||
d->declarators[0]->InitFromDeclSpecs(d->declSpecs);
|
||||
}
|
||||
|
||||
AssertPos(pos, d->declarators.size() == 1);
|
||||
Declarator *decl = d->declarators[0];
|
||||
if (decl == NULL || decl->type == NULL) {
|
||||
AssertPos(pos, m->errorCount > 0);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (decl->name == "") {
|
||||
// Give a name to any anonymous parameter declarations
|
||||
char buf[32];
|
||||
sprintf(buf, "__anon_parameter_%d", i);
|
||||
decl->name = buf;
|
||||
}
|
||||
decl->type = decl->type->ResolveUnboundVariability(Variability::Varying);
|
||||
|
||||
if (d->declSpecs->storageClass != SC_NONE)
|
||||
Error(sym->pos, "Storage class \"%s\" is illegal in "
|
||||
"function parameter declaration for parameter \"%s\".",
|
||||
Error(decl->pos, "Storage class \"%s\" is illegal in "
|
||||
"function parameter declaration for parameter \"%s\".",
|
||||
lGetStorageClassName(d->declSpecs->storageClass),
|
||||
sym->name.c_str());
|
||||
decl->name.c_str());
|
||||
if (decl->type->IsVoidType()) {
|
||||
Error(decl->pos, "Parameter with type \"void\" illegal in function "
|
||||
"parameter list.");
|
||||
decl->type = NULL;
|
||||
}
|
||||
|
||||
const ArrayType *at = dynamic_cast<const ArrayType *>(sym->type);
|
||||
const ArrayType *at = CastType<ArrayType>(decl->type);
|
||||
if (at != NULL) {
|
||||
// As in C, arrays are passed to functions as pointers to
|
||||
// their element type. We'll just immediately make this
|
||||
@@ -368,144 +488,124 @@ Declarator::GetType(const Type *base, DeclSpecs *ds) const {
|
||||
// report this differently than it was originally declared
|
||||
// in the function, but it's not clear that this is a
|
||||
// significant problem.)
|
||||
sym->type = PointerType::GetUniform(at->GetElementType());
|
||||
const Type *targetType = at->GetElementType();
|
||||
if (targetType == NULL) {
|
||||
AssertPos(pos, m->errorCount > 0);
|
||||
return;
|
||||
}
|
||||
|
||||
decl->type = PointerType::GetUniform(targetType, at->IsSOAType());
|
||||
|
||||
// Make sure there are no unsized arrays (other than the
|
||||
// first dimension) in function parameter lists.
|
||||
at = dynamic_cast<const ArrayType *>(at->GetElementType());
|
||||
at = CastType<ArrayType>(targetType);
|
||||
while (at != NULL) {
|
||||
if (at->GetElementCount() == 0)
|
||||
Error(sym->pos, "Arrays with unsized dimensions in "
|
||||
Error(decl->pos, "Arrays with unsized dimensions in "
|
||||
"dimensions after the first one are illegal in "
|
||||
"function parameter lists.");
|
||||
at = dynamic_cast<const ArrayType *>(at->GetElementType());
|
||||
at = CastType<ArrayType>(at->GetElementType());
|
||||
}
|
||||
}
|
||||
|
||||
args.push_back(sym->type);
|
||||
argNames.push_back(sym->name);
|
||||
argPos.push_back(sym->pos);
|
||||
args.push_back(decl->type);
|
||||
argNames.push_back(decl->name);
|
||||
argPos.push_back(decl->pos);
|
||||
|
||||
ConstExpr *init = NULL;
|
||||
if (d->declarators.size()) {
|
||||
// Try to find an initializer expression; if there is one,
|
||||
// it lives down to the base declarator.
|
||||
Declarator *decl = d->declarators[0];
|
||||
while (decl->child != NULL) {
|
||||
Assert(decl->initExpr == NULL);
|
||||
Expr *init = NULL;
|
||||
// Try to find an initializer expression.
|
||||
while (decl != NULL) {
|
||||
if (decl->initExpr != NULL) {
|
||||
decl->initExpr = TypeCheck(decl->initExpr);
|
||||
decl->initExpr = Optimize(decl->initExpr);
|
||||
if (decl->initExpr != NULL) {
|
||||
init = llvm::dyn_cast<ConstExpr>(decl->initExpr);
|
||||
if (init == NULL)
|
||||
init = llvm::dyn_cast<NullPointerExpr>(decl->initExpr);
|
||||
if (init == NULL)
|
||||
Error(decl->initExpr->pos, "Default value for parameter "
|
||||
"\"%s\" must be a compile-time constant.",
|
||||
decl->name.c_str());
|
||||
}
|
||||
break;
|
||||
}
|
||||
else
|
||||
decl = decl->child;
|
||||
}
|
||||
|
||||
if (decl->initExpr != NULL &&
|
||||
(decl->initExpr = TypeCheck(decl->initExpr)) != NULL &&
|
||||
(decl->initExpr = Optimize(decl->initExpr)) != NULL &&
|
||||
(init = dynamic_cast<ConstExpr *>(decl->initExpr)) == NULL) {
|
||||
Error(decl->initExpr->pos, "Default value for parameter "
|
||||
"\"%s\" must be a compile-time constant.",
|
||||
sym->name.c_str());
|
||||
}
|
||||
}
|
||||
argDefaults.push_back(init);
|
||||
}
|
||||
|
||||
const Type *returnType = type;
|
||||
const Type *returnType = baseType;
|
||||
if (returnType == NULL) {
|
||||
Error(pos, "No return type provided in function declaration.");
|
||||
return NULL;
|
||||
return;
|
||||
}
|
||||
|
||||
bool isExported = ds && (ds->storageClass == SC_EXPORT);
|
||||
|
||||
if (CastType<FunctionType>(returnType) != NULL) {
|
||||
Error(pos, "Illegal to return function type from function.");
|
||||
return;
|
||||
}
|
||||
|
||||
returnType = returnType->ResolveUnboundVariability(Variability::Varying);
|
||||
|
||||
bool isExternC = ds && (ds->storageClass == SC_EXTERN_C);
|
||||
bool isExported = ds && ((ds->typeQualifiers & TYPEQUAL_EXPORT) != 0);
|
||||
bool isTask = ds && ((ds->typeQualifiers & TYPEQUAL_TASK) != 0);
|
||||
bool isUnmasked = ds && ((ds->typeQualifiers & TYPEQUAL_UNMASKED) != 0);
|
||||
|
||||
if (isExported && isTask) {
|
||||
Error(pos, "Function can't have both \"task\" and \"export\" "
|
||||
"qualifiers");
|
||||
return NULL;
|
||||
return;
|
||||
}
|
||||
if (isExternC && isTask) {
|
||||
Error(pos, "Function can't have both \"extern \"C\"\" and \"task\" "
|
||||
"qualifiers");
|
||||
return NULL;
|
||||
return;
|
||||
}
|
||||
if (isExternC && isExported) {
|
||||
Error(pos, "Function can't have both \"extern \"C\"\" and \"export\" "
|
||||
"qualifiers");
|
||||
return NULL;
|
||||
return;
|
||||
}
|
||||
if (isUnmasked && isExported)
|
||||
Warning(pos, "\"unmasked\" qualifier is redundant for exported "
|
||||
"functions.");
|
||||
|
||||
if (child == NULL) {
|
||||
AssertPos(pos, m->errorCount > 0);
|
||||
return;
|
||||
}
|
||||
|
||||
const Type *functionType =
|
||||
const FunctionType *functionType =
|
||||
new FunctionType(returnType, args, argNames, argDefaults,
|
||||
argPos, isTask, isExported, isExternC);
|
||||
functionType = functionType->ResolveUnboundVariability(Type::Varying);
|
||||
return child->GetType(functionType, ds);
|
||||
}
|
||||
default:
|
||||
FATAL("Unexpected decl kind");
|
||||
return NULL;
|
||||
}
|
||||
argPos, isTask, isExported, isExternC, isUnmasked);
|
||||
|
||||
#if 0
|
||||
// Make sure we actually have an array of structs ..
|
||||
const StructType *childStructType =
|
||||
dynamic_cast<const StructType *>(childType);
|
||||
if (childStructType == NULL) {
|
||||
Error(pos, "Illegal to provide soa<%d> qualifier with non-struct "
|
||||
"type \"%s\".", soaWidth, childType->GetString().c_str());
|
||||
return new ArrayType(childType, arraySize == -1 ? 0 : arraySize);
|
||||
// handle any explicit __declspecs on the function
|
||||
if (ds != NULL) {
|
||||
for (int i = 0; i < (int)ds->declSpecList.size(); ++i) {
|
||||
std::string str = ds->declSpecList[i].first;
|
||||
SourcePos pos = ds->declSpecList[i].second;
|
||||
|
||||
if (str == "safe")
|
||||
(const_cast<FunctionType *>(functionType))->isSafe = true;
|
||||
else if (!strncmp(str.c_str(), "cost", 4)) {
|
||||
int cost = atoi(str.c_str() + 4);
|
||||
if (cost < 0)
|
||||
Error(pos, "Negative function cost %d is illegal.",
|
||||
cost);
|
||||
(const_cast<FunctionType *>(functionType))->costOverride = cost;
|
||||
}
|
||||
else
|
||||
Error(pos, "__declspec parameter \"%s\" unknown.", str.c_str());
|
||||
}
|
||||
else if ((soaWidth & (soaWidth - 1)) != 0) {
|
||||
Error(pos, "soa<%d> width illegal. Value must be power of two.",
|
||||
soaWidth);
|
||||
return NULL;
|
||||
}
|
||||
else if (arraySize != -1 && (arraySize % soaWidth) != 0) {
|
||||
Error(pos, "soa<%d> width must evenly divide array size %d.",
|
||||
soaWidth, arraySize);
|
||||
return NULL;
|
||||
}
|
||||
return new SOAArrayType(childStructType, arraySize == -1 ? 0 : arraySize,
|
||||
soaWidth);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
const Type *
|
||||
Declarator::GetType(DeclSpecs *ds) const {
|
||||
const Type *baseType = ds->GetBaseType(pos);
|
||||
const Type *type = GetType(baseType, ds);
|
||||
return type;
|
||||
}
|
||||
|
||||
|
||||
Symbol *
|
||||
Declarator::GetSymbolForFunctionParameter(int paramNum) const {
|
||||
Assert(paramNum < (int)functionParams.size());
|
||||
Declaration *d = functionParams[paramNum];
|
||||
|
||||
char buf[32];
|
||||
Symbol *sym;
|
||||
if (d->declarators.size() == 0) {
|
||||
// function declaration like foo(float), w/o a name for
|
||||
// the parameter
|
||||
sprintf(buf, "__anon_parameter_%d", paramNum);
|
||||
sym = new Symbol(buf, pos);
|
||||
sym->type = d->declSpecs->GetBaseType(pos);
|
||||
}
|
||||
else {
|
||||
Assert(d->declarators.size() == 1);
|
||||
sym = d->declarators[0]->GetSymbol();
|
||||
if (sym == NULL) {
|
||||
// Handle more complex anonymous declarations like
|
||||
// float (float **).
|
||||
sprintf(buf, "__anon_parameter_%d", paramNum);
|
||||
sym = new Symbol(buf, d->declarators[0]->pos);
|
||||
sym->type = d->declarators[0]->GetType(d->declSpecs);
|
||||
}
|
||||
}
|
||||
return sym;
|
||||
}
|
||||
|
||||
child->InitFromType(functionType, ds);
|
||||
type = child->type;
|
||||
name = child->name;
|
||||
}
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// Declaration
|
||||
@@ -529,6 +629,7 @@ Declaration::Declaration(DeclSpecs *ds, Declarator *d) {
|
||||
}
|
||||
|
||||
|
||||
|
||||
std::vector<VariableDeclaration>
|
||||
Declaration::GetVariableDeclarations() const {
|
||||
Assert(declSpecs->storageClass != SC_TYPEDEF);
|
||||
@@ -536,18 +637,23 @@ Declaration::GetVariableDeclarations() const {
|
||||
|
||||
for (unsigned int i = 0; i < declarators.size(); ++i) {
|
||||
Declarator *decl = declarators[i];
|
||||
if (decl == NULL)
|
||||
if (decl == NULL || decl->type == NULL) {
|
||||
// Ignore earlier errors
|
||||
Assert(m->errorCount > 0);
|
||||
continue;
|
||||
}
|
||||
|
||||
Symbol *sym = decl->GetSymbol();
|
||||
sym->type = sym->type->ResolveUnboundVariability(Type::Varying);
|
||||
|
||||
if (dynamic_cast<const FunctionType *>(sym->type) == NULL) {
|
||||
if (decl->type->IsVoidType())
|
||||
Error(decl->pos, "\"void\" type variable illegal in declaration.");
|
||||
else if (CastType<FunctionType>(decl->type) == NULL) {
|
||||
decl->type = decl->type->ResolveUnboundVariability(Variability::Varying);
|
||||
Symbol *sym = new Symbol(decl->name, decl->pos, decl->type,
|
||||
decl->storageClass);
|
||||
m->symbolTable->AddVariable(sym);
|
||||
vars.push_back(VariableDeclaration(sym, decl->initExpr));
|
||||
}
|
||||
}
|
||||
|
||||
return vars;
|
||||
}
|
||||
|
||||
@@ -558,18 +664,19 @@ Declaration::DeclareFunctions() {
|
||||
|
||||
for (unsigned int i = 0; i < declarators.size(); ++i) {
|
||||
Declarator *decl = declarators[i];
|
||||
if (decl == NULL)
|
||||
if (decl == NULL || decl->type == NULL) {
|
||||
// Ignore earlier errors
|
||||
Assert(m->errorCount > 0);
|
||||
continue;
|
||||
}
|
||||
|
||||
Symbol *sym = decl->GetSymbol();
|
||||
sym->type = sym->type->ResolveUnboundVariability(Type::Varying);
|
||||
|
||||
if (dynamic_cast<const FunctionType *>(sym->type) == NULL)
|
||||
const FunctionType *ftype = CastType<FunctionType>(decl->type);
|
||||
if (ftype == NULL)
|
||||
continue;
|
||||
|
||||
bool isInline = (declSpecs->typeQualifiers & TYPEQUAL_INLINE);
|
||||
m->AddFunctionDeclaration(sym, isInline);
|
||||
m->AddFunctionDeclaration(decl->name, ftype, decl->storageClass,
|
||||
isInline, decl->pos);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -583,13 +690,14 @@ Declaration::Print(int indent) const {
|
||||
declarators[i]->Print(indent+4);
|
||||
}
|
||||
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void
|
||||
GetStructTypesNamesPositions(const std::vector<StructDeclaration *> &sd,
|
||||
std::vector<const Type *> *elementTypes,
|
||||
std::vector<std::string> *elementNames,
|
||||
std::vector<SourcePos> *elementPositions) {
|
||||
llvm::SmallVector<const Type *, 8> *elementTypes,
|
||||
llvm::SmallVector<std::string, 8> *elementNames,
|
||||
llvm::SmallVector<SourcePos, 8> *elementPositions) {
|
||||
std::set<std::string> seenNames;
|
||||
for (unsigned int i = 0; i < sd.size(); ++i) {
|
||||
const Type *type = sd[i]->type;
|
||||
@@ -599,35 +707,41 @@ GetStructTypesNamesPositions(const std::vector<StructDeclaration *> &sd,
|
||||
// FIXME: making this fake little DeclSpecs here is really
|
||||
// disgusting
|
||||
DeclSpecs ds(type);
|
||||
if (type->IsUniformType())
|
||||
ds.typeQualifiers |= TYPEQUAL_UNIFORM;
|
||||
else if (type->IsVaryingType())
|
||||
ds.typeQualifiers |= TYPEQUAL_VARYING;
|
||||
if (type->IsVoidType() == false) {
|
||||
if (type->IsUniformType())
|
||||
ds.typeQualifiers |= TYPEQUAL_UNIFORM;
|
||||
else if (type->IsVaryingType())
|
||||
ds.typeQualifiers |= TYPEQUAL_VARYING;
|
||||
else if (type->GetSOAWidth() != 0)
|
||||
ds.soaWidth = type->GetSOAWidth();
|
||||
// FIXME: ds.vectorSize?
|
||||
}
|
||||
|
||||
for (unsigned int j = 0; j < sd[i]->declarators->size(); ++j) {
|
||||
Declarator *d = (*sd[i]->declarators)[j];
|
||||
d->InitFromDeclSpecs(&ds);
|
||||
|
||||
Symbol *sym = d->GetSymbol();
|
||||
if (d->type->IsVoidType())
|
||||
Error(d->pos, "\"void\" type illegal for struct member.");
|
||||
|
||||
const ArrayType *arrayType =
|
||||
dynamic_cast<const ArrayType *>(sym->type);
|
||||
if (arrayType != NULL && arrayType->GetElementCount() == 0) {
|
||||
Error(d->pos, "Unsized arrays aren't allowed in struct "
|
||||
"definitions.");
|
||||
elementTypes->push_back(NULL);
|
||||
}
|
||||
else
|
||||
elementTypes->push_back(sym->type);
|
||||
elementTypes->push_back(d->type);
|
||||
|
||||
if (seenNames.find(sym->name) != seenNames.end())
|
||||
if (seenNames.find(d->name) != seenNames.end())
|
||||
Error(d->pos, "Struct member \"%s\" has same name as a "
|
||||
"previously-declared member.", sym->name.c_str());
|
||||
"previously-declared member.", d->name.c_str());
|
||||
else
|
||||
seenNames.insert(sym->name);
|
||||
seenNames.insert(d->name);
|
||||
|
||||
elementNames->push_back(sym->name);
|
||||
elementPositions->push_back(sym->pos);
|
||||
elementNames->push_back(d->name);
|
||||
elementPositions->push_back(d->pos);
|
||||
}
|
||||
}
|
||||
|
||||
for (int i = 0; i < (int)elementTypes->size() - 1; ++i) {
|
||||
const ArrayType *arrayType = CastType<ArrayType>((*elementTypes)[i]);
|
||||
|
||||
if (arrayType != NULL && arrayType->GetElementCount() == 0)
|
||||
Error((*elementPositions)[i], "Unsized arrays aren't allowed except "
|
||||
"for the last member in a struct definition.");
|
||||
}
|
||||
}
|
||||
|
||||
66
decl.h
66
decl.h
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright (c) 2010-2011, Intel Corporation
|
||||
Copyright (c) 2010-2013, Intel Corporation
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
@@ -28,7 +28,7 @@
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
/** @file decl.h
|
||||
@@ -47,30 +47,21 @@
|
||||
variables--here, that the declaration has the 'static' and 'uniform'
|
||||
qualifiers, and that it's basic type is 'int'. Then for each variable
|
||||
declaration, the Declaraiton class holds an instance of a Declarator,
|
||||
which in turn records the per-variable information like the symbol
|
||||
name, array size (if any), initializer expression, etc.
|
||||
which in turn records the per-variable information like the name, array
|
||||
size (if any), initializer expression, etc.
|
||||
*/
|
||||
|
||||
#ifndef ISPC_DECL_H
|
||||
#define ISPC_DECL_H
|
||||
|
||||
#include "ispc.h"
|
||||
#include <llvm/ADT/SmallVector.h>
|
||||
|
||||
struct VariableDeclaration;
|
||||
|
||||
class Declaration;
|
||||
class Declarator;
|
||||
|
||||
enum StorageClass {
|
||||
SC_NONE,
|
||||
SC_EXTERN,
|
||||
SC_EXPORT,
|
||||
SC_STATIC,
|
||||
SC_TYPEDEF,
|
||||
SC_EXTERN_C
|
||||
};
|
||||
|
||||
|
||||
/* Multiple qualifiers can be provided with types in declarations;
|
||||
therefore, they are set up so that they can be ANDed together into an
|
||||
int. */
|
||||
@@ -82,6 +73,8 @@ enum StorageClass {
|
||||
#define TYPEQUAL_SIGNED (1<<4)
|
||||
#define TYPEQUAL_UNSIGNED (1<<5)
|
||||
#define TYPEQUAL_INLINE (1<<6)
|
||||
#define TYPEQUAL_EXPORT (1<<7)
|
||||
#define TYPEQUAL_UNMASKED (1<<8)
|
||||
|
||||
/** @brief Representation of the declaration specifiers in a declaration.
|
||||
|
||||
@@ -90,7 +83,8 @@ enum StorageClass {
|
||||
*/
|
||||
class DeclSpecs {
|
||||
public:
|
||||
DeclSpecs(const Type *t = NULL, StorageClass sc = SC_NONE, int tq = TYPEQUAL_NONE);
|
||||
DeclSpecs(const Type *t = NULL, StorageClass sc = SC_NONE,
|
||||
int tq = TYPEQUAL_NONE);
|
||||
|
||||
void Print() const;
|
||||
|
||||
@@ -117,6 +111,8 @@ public:
|
||||
SOA width specified. Otherwise this is zero.
|
||||
*/
|
||||
int soaWidth;
|
||||
|
||||
std::vector<std::pair<std::string, SourcePos> > declSpecList;
|
||||
};
|
||||
|
||||
|
||||
@@ -128,7 +124,7 @@ enum DeclaratorKind {
|
||||
DK_FUNCTION
|
||||
};
|
||||
|
||||
/** @brief Representation of the declaration of a single variable.
|
||||
/** @brief Representation of the declaration of a single variable.
|
||||
|
||||
In conjunction with an instance of the DeclSpecs, this gives us
|
||||
everything we need for a full variable declaration.
|
||||
@@ -138,25 +134,11 @@ public:
|
||||
Declarator(DeclaratorKind dk, SourcePos p);
|
||||
|
||||
/** Once a DeclSpecs instance is available, this method completes the
|
||||
initialization of the Symbol, setting its Type accordingly.
|
||||
initialization of the type member.
|
||||
*/
|
||||
void InitFromDeclSpecs(DeclSpecs *ds);
|
||||
|
||||
/** Get the actual type of the combination of Declarator and the given
|
||||
DeclSpecs. If an explicit base type is provided, the declarator is
|
||||
applied to that type; otherwise the base type from the DeclSpecs is
|
||||
used. */
|
||||
const Type *GetType(DeclSpecs *ds) const;
|
||||
const Type *GetType(const Type *base, DeclSpecs *ds) const;
|
||||
|
||||
/** Returns the symbol corresponding to the function declared by this
|
||||
declarator and symbols for its arguments in *args. */
|
||||
Symbol *GetFunctionInfo(DeclSpecs *ds, std::vector<Symbol *> *args);
|
||||
|
||||
Symbol *GetSymbolForFunctionParameter(int paramNum) const;
|
||||
|
||||
/** Returns the symbol associated with the declarator. */
|
||||
Symbol *GetSymbol() const;
|
||||
void InitFromType(const Type *base, DeclSpecs *ds);
|
||||
|
||||
void Print(int indent) const;
|
||||
|
||||
@@ -177,18 +159,24 @@ public:
|
||||
/** Type qualifiers provided with the declarator. */
|
||||
int typeQualifiers;
|
||||
|
||||
StorageClass storageClass;
|
||||
|
||||
/** For array declarators, this gives the declared size of the array.
|
||||
Unsized arrays have arraySize == 0. */
|
||||
Unsized arrays have arraySize == 0. */
|
||||
int arraySize;
|
||||
|
||||
/** Symbol associated with the declarator. */
|
||||
Symbol *sym;
|
||||
/** Name associated with the declarator. */
|
||||
std::string name;
|
||||
|
||||
/** Initialization expression for the variable. May be NULL. */
|
||||
Expr *initExpr;
|
||||
|
||||
/** Type of the declarator. This is NULL until InitFromDeclSpecs() or
|
||||
InitFromType() is called. */
|
||||
const Type *type;
|
||||
|
||||
/** For function declarations, this holds the Declaration *s for the
|
||||
funciton's parameters. */
|
||||
function's parameters. */
|
||||
std::vector<Declaration *> functionParams;
|
||||
};
|
||||
|
||||
@@ -233,8 +221,8 @@ struct StructDeclaration {
|
||||
/** Given a set of StructDeclaration instances, this returns the types of
|
||||
the elements of the corresponding struct and their names. */
|
||||
extern void GetStructTypesNamesPositions(const std::vector<StructDeclaration *> &sd,
|
||||
std::vector<const Type *> *elementTypes,
|
||||
std::vector<std::string> *elementNames,
|
||||
std::vector<SourcePos> *elementPositions);
|
||||
llvm::SmallVector<const Type *, 8> *elementTypes,
|
||||
llvm::SmallVector<std::string, 8> *elementNames,
|
||||
llvm::SmallVector<SourcePos, 8> *elementPositions);
|
||||
|
||||
#endif // ISPC_DECL_H
|
||||
|
||||
@@ -1,3 +1,604 @@
|
||||
=== v1.9.1 === (8 July 2016)
|
||||
|
||||
An ISPC update with new native AVX512 target for future Xeon CPUs and
|
||||
improvements for debugging, including new switch --dwarf-version to support
|
||||
debugging on old systems.
|
||||
|
||||
The release is based on patched LLVM 3.8.
|
||||
|
||||
=== v1.9.0 === (12 Feb 2016)
|
||||
|
||||
An ISPC release with AVX512 (KNL flavor) support and a number of bug fixes,
|
||||
based on fresh LLVM 3.8 backend.
|
||||
|
||||
For AVX512 two modes are supported - generic and native. For instructions on how
|
||||
to use them, please refer to the wiki. Going forward we assume that native mode
|
||||
is the primary way to get AVX512 support and that generic mode will be deprecated.
|
||||
If you observe significantly better performance in generic mode, please report
|
||||
it via github issues.
|
||||
|
||||
Starting this release we are shipping two versions on Windows:
|
||||
(1) for VS2013 and earlier releases
|
||||
(2) for VS2015 and newer releases
|
||||
The reason for doing this is the redesigned C run-time library in VS.
|
||||
An implementation of "print" ISPC standard library function relies on C runtime
|
||||
library, which has changed. If you are not using "print" function in your code,
|
||||
you are safe to use either version.
|
||||
|
||||
A new options was introduced to improve debugging: --no-omit-frame-pointer.
|
||||
|
||||
=== v1.8.2 === (29 May 2015)
|
||||
|
||||
An ISPC update with several important stability fixes and an experimental
|
||||
AVX512 support.
|
||||
|
||||
Current level of AVX512 support is targeting the new generation of Xeon Phi
|
||||
codename Knights Landing. It's implemented in two different ways: as generic and
|
||||
native target. Generic target is similar to KNC support and requires Intel C/C++
|
||||
Compiler (15.0 and newer) and is available in regular ISPC build, which is
|
||||
based on LLVM 3.6.1. For the native AVX512 target, we have a separate ISPC
|
||||
build, which is based on LLVM trunk (3.7). This build is less stable and has
|
||||
several known issues. Nevertheless, if you are interested in AVX512 support for
|
||||
your code, we encourage you to try it and report the bugs. We actively working
|
||||
with LLVM maintainers to fix all AVX512 bugs, so your feedback is important for
|
||||
us and will ensure that bugs affecting your code are fixed by LLVM 3.7 release.
|
||||
|
||||
Other notable changes and fixes include:
|
||||
|
||||
* Broadwell support via --cpu=broadwell.
|
||||
|
||||
* Changed cpu naming to accept cpu codenames. Check help for more details.
|
||||
|
||||
* --cpu switch disallowed in multi-target mode.
|
||||
|
||||
* Alignment of structure fields (in generated header files) is changed to be
|
||||
more consistent regardless used C/C++ compiler.
|
||||
|
||||
* --dllexport switch is added on Windows to make non-static functions DLL
|
||||
export.
|
||||
|
||||
* --print-target switch is added to dump details of LLVM target machine.
|
||||
This may help you to debug issues with code generation for incorrect target
|
||||
(or more likely to ensure that code generation is done right).
|
||||
|
||||
* A bug was fixed, which triggered uniform statements to be executed with
|
||||
all-off mask under some circumstances.
|
||||
|
||||
* The restriction for using some uniform types as return type in multi-target
|
||||
mode with targets of different width was relaxed.
|
||||
|
||||
Also, if you are using ISPC for code generation for current generation of
|
||||
Xeon Phi (Knights Corner), the following changes are for you:
|
||||
|
||||
* A bunch of stability fixes for KNC.
|
||||
|
||||
* A bug, which affects projects with multiple ISPC source files compiled with generic
|
||||
target is fixed. As side effect, you may see multiple warnings about unused static
|
||||
functions - you need to add "-wd177" switch to ICC compiling generic output files.
|
||||
|
||||
The release includes LLVM 3.6.1 binaries for Linux, MacOS, Windows and Windows based
|
||||
cross-compiler for Sony PlayStation4. LLVM 3.5 based experimental Linux binary with
|
||||
NVPTX support (now supporting also K80).
|
||||
|
||||
Native AVX512 support is available in the set of less stable LLVM 3.7 based binaries
|
||||
for Linux, MacOS and Windows.
|
||||
|
||||
=== v1.8.1 === (31 December 2014)
|
||||
|
||||
A minor update of ``ispc`` with several important stability fixes, namely:
|
||||
|
||||
* Auto-dispatch mechanism is fixed in pre-built Linux binaries (it used to
|
||||
select too conservative target).
|
||||
|
||||
* Compile crash with "-O2 -g" is fixed.
|
||||
|
||||
Also KNC (Xeon Phi) support is further improved.
|
||||
|
||||
The release includes experimental build for Sony PlayStation4 target (Windows
|
||||
cross compiler), as well NVPTX experimental support (64 bit Linux binaries
|
||||
only). Note that there might be NVPTX compilation fails with CUDA 7.0.
|
||||
|
||||
Similar to 1.8.0 all binaries are based on LLVM 3.5. MacOS binaries are built
|
||||
for MacOS 10.9 Mavericks. Linux binaries are compatible with kernel 2.6.32
|
||||
(ok for RHEL6) and later.
|
||||
|
||||
=== v1.8.0 === (16 October 2014)
|
||||
|
||||
A major new version of ISPC, which introduces experimental support for NVPTX
|
||||
target, brings numerous improvements to our KNC (Xeon Phi) support, introduces
|
||||
debugging support on Windows and fixes several bugs. We also ship experimental
|
||||
build for Sony PlayStation4 target in this release. Binaries for all platforms
|
||||
are based on LLVM 3.5.
|
||||
|
||||
Note that MacOS binaries are build for MacOS 10.9 Mavericks. Linux binaries are
|
||||
compatible with kernel 2.6.32 (ok for RHEL6) and later.
|
||||
|
||||
More details:
|
||||
|
||||
* Experimental NVPTX support is available for users of our binary distribution
|
||||
on Linux only at the moment. MacOS and Windows users willing to experiment
|
||||
with this target are welcome to build it from source. Note that GPU imposes
|
||||
some limitation on ISPC language, which are discussed in corresponding section
|
||||
of ISPC User's Guide. Implementation of NVPTX support was done by our
|
||||
contributor Evghenii Gaburov.
|
||||
|
||||
* KNC support was greatly extended in knc.h header file. Beyond new features
|
||||
there are stability fixes and changes for icc 15.0 compatibility. Stdlib
|
||||
prefetch functions were improved to map to KNC vector prefetches.
|
||||
|
||||
* PS4 experimental build is Windows to PS4 cross compiler, which disables arch
|
||||
and cpu selection (which are preset to PS4 hardware).
|
||||
|
||||
* Debug info support on Windows (compatible with VS2010, VS2012 and VS2013).
|
||||
|
||||
* Critical bug fix, which caused code generation for incorrect target, despite
|
||||
explicit target switches, under some conditions.
|
||||
|
||||
* Stability fix of the bug, which caused print() function to execute under
|
||||
all-off mask under some conditions.
|
||||
|
||||
=== v1.7.0 === (18 April 2014)
|
||||
|
||||
A major new version of ISPC with several language and library extensions and
|
||||
fixes in debug info support. Binaries for all platforms are based on patched
|
||||
version on LLVM 3.4. There also performance improvements beyond switchover to
|
||||
LLVM 3.4.
|
||||
|
||||
The list of language and library changes:
|
||||
|
||||
* Support for varying types in exported functions was added. See documentation
|
||||
for more details.
|
||||
|
||||
* get_programCount() function was moved from stdlib.ispc to
|
||||
examples/util/util.isph, which needs to be included somewhere in your
|
||||
project, if you want to use it.
|
||||
|
||||
* Library functions for saturated arithmetic were added. add/sub/mul/div
|
||||
operations are supported for signed and unsigned 8/16/32/64 integer types
|
||||
(both uniform and varying).
|
||||
|
||||
* The algorithm for selecting overloaded function was extended to cover more
|
||||
types of overloading. Handling of reference types in overloaded functions was
|
||||
fixed. The rules for selecting the best match were changed to match C++,
|
||||
which requires the function to be the best match for all parameters. In
|
||||
ambiguous cases, a warning is issued, but it will be converted to an error
|
||||
in the next release.
|
||||
|
||||
* Explicit typecasts between any two reference types were allowed.
|
||||
|
||||
* Implicit cast of pointer to const type to void* was disallowed.
|
||||
|
||||
The list of other notable changes is:
|
||||
|
||||
* Number of fixes for better debug info support.
|
||||
|
||||
* Memory corruption bug was fixed, which caused rare but not reproducible
|
||||
compile time fails.
|
||||
|
||||
* Alias analysis was enabled (more aggressive optimizations are expected).
|
||||
|
||||
* A bug involving inaccurate handling of "const" qualifier was fixed. As a
|
||||
result, more "const" qualifiers may appear in .h files, which may cause
|
||||
compilation errors.
|
||||
|
||||
=== v1.6.0 === (19 December 2013)
|
||||
|
||||
A major new version of ISPC with major improvements in performance and
|
||||
stability. Linux and MacOS binaries are based on patched version of LLVM 3.3,
|
||||
while Windows version is based on LLVM 3.4rc3. LLVM 3.4 significantly improves
|
||||
stability on Win32 platform, so we've decided not to wait for official LLVM 3.4
|
||||
release.
|
||||
|
||||
The list of the most significant changes is:
|
||||
|
||||
* New avx1-i32x4 target was added. It may play well for you, if you are focused
|
||||
on integer computations or FP unit in your hardware is 128 bit wide.
|
||||
|
||||
* Support for calculations in double precision was extended with two new
|
||||
targets avx1.1-i64x4 and avx2-i64x4.
|
||||
|
||||
* Language support for overloaded operators was added.
|
||||
|
||||
* New library shift() function was added, which is similar to rotate(), but is
|
||||
non-circular.
|
||||
|
||||
* The language was extended to accept 3 dimensional tasking - a syntactic sugar,
|
||||
which may facilitate programming of some tasks.
|
||||
|
||||
* Regression, which broke --opt=force-aligned-memory is fixed.
|
||||
|
||||
If you are not using pre-built binaries, you may notice the following changes:
|
||||
|
||||
* VS2012/VS2013 are supported.
|
||||
|
||||
* alloy.py (with -b switch) can build LLVM for you on any platform now
|
||||
(except MacOS 10.9, but we know about the problem and working on it).
|
||||
This is a preferred way to build LLVM for ISPC, as all required patches for
|
||||
better performance and stability will automatically apply.
|
||||
|
||||
* LLVM 3.5 (current trunk) is supported.
|
||||
|
||||
There are also multiple fixes for better performance and stability, most
|
||||
notable are:
|
||||
|
||||
* Fixed performance problem for x2 targets.
|
||||
|
||||
* Fixed a problem with incorrect vzeroupper insertion on AVX target on Win32.
|
||||
|
||||
=== v1.5.0 === (27 September 2013)
|
||||
|
||||
A major new version of ISPC with several new targets and important bug fixes.
|
||||
Here's a list of the most important changes, if you are using pre-built
|
||||
binaries (which are based on patched version of LLVM 3.3):
|
||||
|
||||
* The naming of targets was changed to explicitly include data type width and
|
||||
a number of threads in the gang. For example, avx2-i32x8 is avx2 target,
|
||||
which uses 32 bit types as a base and has 8 threads in a gang. Old naming
|
||||
scheme is still supported, but depricated.
|
||||
|
||||
* New SSE4 targets for calculations based on 8 bit and 16 bit data types:
|
||||
sse4-i8x16 and sse4-i16x8.
|
||||
|
||||
* New AVX1 target for calculations based on 64 bit data types: avx1-i64x4.
|
||||
|
||||
* SVML support was extended and improved.
|
||||
|
||||
* Behavior of -g switch was changed to not affect optimization level.
|
||||
|
||||
* ISPC debug infrastructure was redesigned. See --help-dev for more info and
|
||||
enjoy capabilities of new --debug-phase=<value> and --off-phase=<value>
|
||||
switches.
|
||||
|
||||
* Fixed an auto-dispatch bug, which caused AVX code execution when OS doesn't
|
||||
support AVX (but hardware does).
|
||||
|
||||
* Fixed a bug, which discarded uniform/varying keyword in typedefs.
|
||||
|
||||
* Several performance regressions were fixed.
|
||||
|
||||
If you are building ISPC yourself, then following changes are also available
|
||||
to you:
|
||||
|
||||
* --cpu=slm for targeting Intel Atom codename Silvermont (if LLVM 3.4 is used).
|
||||
|
||||
* ARM NEON targets are available (if enabled in build system).
|
||||
|
||||
* --debug-ir=<value> is available to generate debug information based on LLVM
|
||||
IR (if LLVM 3.4 is used). In debugger you'll see LLVM IR instead of source
|
||||
code.
|
||||
|
||||
* A redesigned and improved test and configuration management system is
|
||||
available to facilitate the process of building LLVM and testing ISPC
|
||||
compiler.
|
||||
|
||||
Standard library changes/fixes:
|
||||
|
||||
* __pause() function was removed from standard library.
|
||||
|
||||
* Fixed reduce_[min|max]_[float|double] intrinsics, which were producing
|
||||
incorrect code under some conditions.
|
||||
|
||||
Language changes:
|
||||
|
||||
* By default a floating point constant without a suffix is a single precision
|
||||
constant (32 bit). A new suffix "d" was introduced to allow double precision
|
||||
constant (64 bit). Please refer to tests/double-consts.ispc for syntax
|
||||
examples.
|
||||
|
||||
=== v1.4.4 === (19 July 2013)
|
||||
|
||||
A minor version update with several stability fixes requested by the customers.
|
||||
|
||||
=== v1.4.3 === (25 June 2013)
|
||||
|
||||
A minor version update with several stability improvements:
|
||||
|
||||
* Two bugs were fixed (including a bug in LLVM) to improve stability on 32 bit
|
||||
platforms.
|
||||
|
||||
* A bug affecting several examples was fixed.
|
||||
|
||||
* --instrument switch is fixed.
|
||||
|
||||
All tests and examples now properly compile and execute on native targets on
|
||||
Unix platforms (Linux and MacOS).
|
||||
|
||||
=== v1.4.2 === (11 June 2013)
|
||||
|
||||
A minor version update with a few important changes:
|
||||
|
||||
* Stability fix for AVX2 target (Haswell) - problem with gather instructions was
|
||||
released in LLVM 3.4, if you build with LLVM 3.2 or 3.3, it's available in our
|
||||
repository (llvm_patches/r183327-AVX2-GATHER.patch) and needs to be applied
|
||||
manually.
|
||||
|
||||
* Stability fix for widespread issue on Win32 platform (#503).
|
||||
|
||||
* Performance improvements for Xeon Phi related to mask representation.
|
||||
|
||||
Also LLVM 3.3 has been released and now it's the recommended version for building ISPC.
|
||||
Precompiled binaries are also built with LLVM 3.3.
|
||||
|
||||
=== v1.4.1 === (28 May 2013)
|
||||
|
||||
A major new version of ispc has been released with stability and performance
|
||||
improvements on all supported platforms (Windows, Linux and MacOS).
|
||||
This version supports LLVM 3.1, 3.2, 3.3 and 3.4. The released binaries are built with 3.2.
|
||||
|
||||
New compiler features:
|
||||
|
||||
* ISPC memory allocation returns aligned memory with platform natural alignment
|
||||
of vector registers by default. Alignment can also be managed via
|
||||
--force-alignment=<value>.
|
||||
|
||||
Important bug fixes/changes:
|
||||
|
||||
* ISPC was fixed to be fully functional when built by GCC 4.7.
|
||||
|
||||
* Major cleanup of build and test scripts on Windows.
|
||||
|
||||
* Gather/scatter performance improvements on Xeon Phi.
|
||||
|
||||
* FMA instructions are enabled for AVX2 instruction set.
|
||||
|
||||
* Support of RDRAND instruction when available via library function rdrand (Ivy Bridge).
|
||||
|
||||
Release also contains numerous bug fixes and minor improvements.
|
||||
|
||||
=== v1.3.0 === (29 June 2012)
|
||||
|
||||
This is a major new release of ispc, with support for more compilation
|
||||
targets and a number of additions to the language. As usual, the quality
|
||||
of generated code has also been improved in a number of cases and a number
|
||||
of small bugs have been fixed.
|
||||
|
||||
New targets:
|
||||
|
||||
* This release provides "beta" support for compiling to Intel® Xeon
|
||||
Phi™ processor, code named Knights Corner, the first processor in
|
||||
the Intel® Many Integrated Core Architecture. See
|
||||
http://ispc.github.com/ispc.html#compiling-for-the-intel-xeon-phi-architecture
|
||||
for more details on this support.
|
||||
|
||||
* This release also has an "avx1.1" target, which provides support for the
|
||||
new instructions in the Intel Ivy Bridge microarchitecutre.
|
||||
|
||||
New language features:
|
||||
|
||||
* The foreach_active statement allows iteration over the active program
|
||||
instances in a gang. (See
|
||||
http://ispc.github.com/ispc.html#iteration-over-active-program-instances-foreach-active)
|
||||
|
||||
* foreach_unique allows iterating over subsets of program instances in a
|
||||
gang that share the same value of a variable. (See
|
||||
http://ispc.github.com/ispc.html#iteration-over-unique-elements-foreach-unique)
|
||||
|
||||
* An "unmasked" function qualifier and statement in the language allow
|
||||
re-activating execution of all program instances in a gang. (See
|
||||
http://ispc.github.com/ispc.html#re-establishing-the-execution-mask
|
||||
|
||||
Standard library updates:
|
||||
|
||||
* The seed_rng() function has been modified to take a "varying" seed value
|
||||
when a varying RNGState is being initialized.
|
||||
|
||||
* An isnan() function has been added, to check for floating-point "not a
|
||||
number" values.
|
||||
|
||||
* The float_to_srgb8() routine does high performance conversion of
|
||||
floating-point color values to SRGB8 format.
|
||||
|
||||
Other changes:
|
||||
|
||||
* A number of bugfixes have been made for compiler crashes with malformed
|
||||
programs.
|
||||
|
||||
* Floating-point comparisons are now "unordered", so that any comparison
|
||||
where one of the operands is a "not a number" value returns false. (This
|
||||
matches standard IEEE floating-point behavior.)
|
||||
|
||||
* The code generated for 'break' statements in "varying" loops has been
|
||||
improved for some common cases.
|
||||
|
||||
* Compile time and compiler memory use have both been improved,
|
||||
particularly for large input programs.
|
||||
|
||||
* A nubmer of bugs have been fixed in the debugging information generated
|
||||
by the compiler when the "-g" command-line flag is used.
|
||||
|
||||
=== v1.2.2 === (20 April 2012)
|
||||
|
||||
This release includes a number of small additions to functionality and a
|
||||
number of bugfixes. New functionality includes:
|
||||
|
||||
* It's now possible to forward declare structures as in C/C++: "struct
|
||||
Foo;". After such a declaration, structs with pointers to "Foo" and
|
||||
functions that take pointers or references to Foo structs can be declared
|
||||
without the entire definition of Foo being available.
|
||||
|
||||
* New built-in types size_t, ptrdiff_t, and [u]intptr_t are now available,
|
||||
corresponding to the equivalent types in C.
|
||||
|
||||
* The standard library now provides atomic_swap*() and
|
||||
atomic_compare_exchange*() functions for void * types.
|
||||
|
||||
* The C++ backend has seen a number of improvements to the quality and
|
||||
readability of generated code.
|
||||
|
||||
A number of bugs have been fixed in this release as well. The most
|
||||
significant are:
|
||||
|
||||
* Fixed a bug where nested loops could cause a compiler crash in some
|
||||
circumstances (issues #240, and #229)
|
||||
|
||||
* Gathers could access invlaid mamory (and cause the program to crash) in
|
||||
some circumstances (#235)
|
||||
|
||||
* References to temporary values are now handled properly when passed to a
|
||||
function that takes a reference typed parameter.
|
||||
|
||||
* A case where incorrect code could be generated for compile-time-constant
|
||||
initializers has been fixed (#234).
|
||||
|
||||
=== v1.2.1 === (6 April 2012)
|
||||
|
||||
This release contains only minor new functionality and is mostly for many
|
||||
small bugfixes and improvements to error handling and error reporting.
|
||||
The new functionality that is present is:
|
||||
|
||||
* Significantly more efficient versions of the float / half conversion
|
||||
routines are now available in the standard library, thanks to Fabian
|
||||
Giesen.
|
||||
|
||||
* The last member of a struct can now be a zero-length array; this allows
|
||||
the trick of dynamically allocating enough storage for the struct and
|
||||
some number of array elements at the end of it.
|
||||
|
||||
Significant bugs fixed include:
|
||||
|
||||
* Issue #205: When a target ISA isn't specified, use the host system's
|
||||
capabilities to choose a target for which it will be able to run the
|
||||
generated code.
|
||||
|
||||
* Issues #215 and #217: Don't allocate storage for global variables that
|
||||
are declared "extern".
|
||||
|
||||
* Issue #197: Allow NULL as a default argument value in a function
|
||||
declaration.
|
||||
|
||||
* Issue #223: Fix bugs where taking the address of a function wouldn't work
|
||||
as expected.
|
||||
|
||||
* Issue #224: When there are overloaded variants of a function that take
|
||||
both reference and const reference parameters, give the non-const
|
||||
reference preference when matching values of that underlying type.
|
||||
|
||||
* Issue #225: An error is issed when a varying lvalue is assigned to a
|
||||
reference type (rather than crashing).
|
||||
|
||||
* Issue #193: Permit conversions from array types to void *, not just the
|
||||
pointer type of the underlying array element.
|
||||
|
||||
* Issue #199: Still evaluate expressions that are cast to (void).
|
||||
|
||||
The documentation has also been improved, with FAQs added to clarify some
|
||||
aspects of the ispc pointer model.
|
||||
|
||||
=== v1.2.0 === (20 March 2012)
|
||||
|
||||
This is a major new release of ispc, with a number of significant
|
||||
improvements to functionality, performance, and compiler robustness. It
|
||||
does, however, include three small changes to language syntax and semantics
|
||||
that may require changes to existing programs:
|
||||
|
||||
* Syntax for the "launch" keyword has been cleaned up; it's now no longer
|
||||
necessary to bracket the launched function call with angle brackets.
|
||||
(In other words, now use "launch foo();", rather than "launch < foo() >;".
|
||||
|
||||
* When using pointers, the pointed-to data type is now "uniform" by
|
||||
default. Use the varying keyword to specify varying pointed-to types when
|
||||
needed. (i.e. "float *ptr" is a varying pointer to uniform float data,
|
||||
whereas previously it was a varying pointer to varying float values.)
|
||||
Use "varying float *" to specify a varying pointer to varying float data,
|
||||
and so forth.
|
||||
|
||||
* The details of "uniform" and "varying" and how they interact with struct
|
||||
types have been cleaned up. Now, when a struct type is declared, if the
|
||||
struct elements don't have explicit "uniform" or "varying" qualifiers,
|
||||
they are said to have "unbound" variability. When a struct type is
|
||||
instantiated, any unbound variability elements inherit the variability of
|
||||
the parent struct type. See http://ispc.github.com/ispc.html#struct-types
|
||||
for more details.
|
||||
|
||||
ispc has a new language feature that makes it much easier to use the
|
||||
efficient "(array of) structure of arrays" (AoSoA, or SoA) memory layout of
|
||||
data. A new "soa<n>" qualifier can be applied to structure types to
|
||||
specify an n-wide SoA version of the corresponding type. Array indexing
|
||||
and pointer operations with arrays SoA types automatically handles the
|
||||
two-stage indexing calculation to access the data. See
|
||||
http://ispc.github.com/ispc.html#structure-of-array-types for more details.
|
||||
|
||||
For more efficient access of data that is still in "array of structures"
|
||||
(AoS) format, ispc has a new "memory coalescing" optimization that
|
||||
automatically detects series of strided loads and/or gathers that can be
|
||||
transformed into a more efficient set of vector loads and shuffles. A
|
||||
diagnostic is emitted when this optimization is successfully applied.
|
||||
|
||||
Smaller changes in this release:
|
||||
|
||||
* The standard library now provides memcpy(), memmove() and memset()
|
||||
functions, as well as single-precision asin() and acos() functions.
|
||||
|
||||
* -I can now be specified on the command-line to specify a search path for
|
||||
#include files.
|
||||
|
||||
* A number of improvements have been made to error reporting from the
|
||||
parser, and a number of cases where malformed programs could cause the
|
||||
compiler to crash have been fixed.
|
||||
|
||||
* A number of small improvements to the quality and performance of generated
|
||||
code have been made, including finding more cases where 32-bit addressing
|
||||
calculations can be safely done on 64-bit systems and generating better
|
||||
code for initializer expressions.
|
||||
|
||||
=== v1.1.4 === (4 February 2012)
|
||||
|
||||
There are two major bugfixes for Windows in this release. First, a number
|
||||
of failures in AVX code generation on Windows have been fixed; AVX on
|
||||
Windows now has no known issues. Second, a longstanding bug in parsing 64-bit
|
||||
integer constants on Windows has been fixed.
|
||||
|
||||
This release features a new experimental scalar target, contributed by Gabe
|
||||
Weisz <gweisz@cs.cmu.edu>. This target ("--target=generic-1") compiles
|
||||
gangs of single program instances (i.e. programCount == 1); it can be
|
||||
useful for debugging ispc programs.
|
||||
|
||||
The compiler now supports dynamic memory allocation in ispc programs (with
|
||||
"new" and "delete" operators based on C++). See
|
||||
http://ispc.github.com/ispc.html#dynamic-memory-allocation in the
|
||||
documentation for more information.
|
||||
|
||||
ispc now performs "short circuit" evaluation of the || and && logical
|
||||
operators and the ? : selection operator. (This represents the correction
|
||||
of a major incompatibility with C.) Code like "(index < arraySize &&
|
||||
array[index] == 1)" thus now executes as in C, where "array[index]" won't
|
||||
be evaluated unless "index" is less than "arraySize".
|
||||
|
||||
The standard library now provides "local" atomic operations, which are
|
||||
atomic across the gang of program instances (but not across other gangs or
|
||||
other hardware threads. See the updated documentation on atomics for more
|
||||
information:
|
||||
http://ispc.github.com/ispc.html#atomic-operations-and-memory-fences.
|
||||
|
||||
The standard library now offers a clock() function, which returns a uniform
|
||||
int64 value that counts processor cycles; it can be used for
|
||||
fine-resolution timing measurements.
|
||||
|
||||
Finally (of limited interest now): ispc now supports the forthcoming AVX2
|
||||
instruction set, due with Haswell-generation CPUs. All tests and examples
|
||||
compile and execute correctly with AVX2. (Thanks specifically to Craig
|
||||
Topper and Nadav Rotem for work on AVX2 support in LLVM, which made this
|
||||
possible.)
|
||||
|
||||
=== v1.1.3 === (20 January 2012)
|
||||
|
||||
With this release, the language now supports "switch" statements, with the
|
||||
same semantics and syntax as in C.
|
||||
|
||||
This release includes fixes for two important performance related issues:
|
||||
the quality of code generated for "foreach" statements has been
|
||||
substantially improved (https://github.com/ispc/ispc/issues/151), and a
|
||||
performance regression with code for "gathers" that was introduced in
|
||||
v1.1.2 has been fixed in this release.
|
||||
|
||||
A number of other small bugs were fixed in this release as well, including
|
||||
one where invalid memory would sometimes be incorrectly accessed
|
||||
(https://github.com/ispc/ispc/issues/160).
|
||||
|
||||
Thanks to Jean-Luc Duprat for a number of patches that improve support for
|
||||
building on various platforms, and to Pierre-Antoine Lacaze for patches so
|
||||
that ispc builds under MinGW.
|
||||
|
||||
=== v1.1.2 === (9 January 2012)
|
||||
|
||||
The major new feature in this release is support for "generic" C++
|
||||
|
||||
@@ -1,11 +1,16 @@
|
||||
#!/bin/bash
|
||||
|
||||
rst2html=rst2html.py
|
||||
|
||||
for i in ispc perfguide faq; do
|
||||
rst2html.py --template=template.txt --link-stylesheet \
|
||||
$rst2html --template=template.txt --link-stylesheet \
|
||||
--stylesheet-path=css/style.css $i.rst > $i.html
|
||||
done
|
||||
|
||||
rst2html.py --template=template-perf.txt --link-stylesheet \
|
||||
$rst2html --template=template-news.txt --link-stylesheet \
|
||||
--stylesheet-path=css/style.css news.rst > news.html
|
||||
|
||||
$rst2html --template=template-perf.txt --link-stylesheet \
|
||||
--stylesheet-path=css/style.css perf.rst > perf.html
|
||||
|
||||
#rst2latex --section-numbering --documentclass=article --documentoptions=DIV=9,10pt,letterpaper ispc.txt > ispc.tex
|
||||
|
||||
405
docs/faq.rst
405
docs/faq.rst
@@ -1,10 +1,10 @@
|
||||
=============================================================
|
||||
Intel® SPMD Program Compiler Frequently Asked Questions (FAQ)
|
||||
=============================================================
|
||||
=====================================
|
||||
Frequently Asked Questions About ispc
|
||||
=====================================
|
||||
|
||||
This document includes a number of frequently (and not frequently) asked
|
||||
questions about ispc, the Intel® SPMD Program Compiler. The source to this
|
||||
document is in the file ``docs/faq.txt`` in the ``ispc`` source
|
||||
document is in the file ``docs/faq.rst`` in the ``ispc`` source
|
||||
distribution.
|
||||
|
||||
* Understanding ispc's Output
|
||||
@@ -14,11 +14,24 @@ distribution.
|
||||
+ `Why are there multiple versions of exported ispc functions in the assembly output?`_
|
||||
+ `How can I more easily see gathers and scatters in generated assembly?`_
|
||||
|
||||
* Running The Compiler
|
||||
|
||||
+ `Why is it required to use one of the "generic" targets with C++ output?`_
|
||||
+ `Why won't the compiler generate an object file or assembly output with the "generic" targets?`_
|
||||
|
||||
* Language Details
|
||||
|
||||
+ `What is the difference between "int *foo" and "int foo[]"?`_
|
||||
+ `Why are pointed-to types "uniform" by default?`_
|
||||
+ `What am I getting an error about assigning a varying lvalue to a reference type?`_
|
||||
|
||||
* Interoperability
|
||||
|
||||
+ `How can I supply an initial execution mask in the call from the application?`_
|
||||
+ `How can I generate a single binary executable with support for multiple instruction sets?`_
|
||||
+ `How can I determine at run-time which vector instruction set's instructions were selected to execute?`_
|
||||
+ `Is it possible to inline ispc functions in C/C++ code?`_
|
||||
+ `Why is it illegal to pass "varying" values from C/C++ to ispc functions?`_
|
||||
|
||||
* Programming Techniques
|
||||
|
||||
@@ -26,6 +39,8 @@ distribution.
|
||||
+ `How can a gang of program instances generate variable amounts of output efficiently?`_
|
||||
+ `Is it possible to use ispc for explicit vector programming?`_
|
||||
+ `How can I debug my ispc programs using Valgrind?`_
|
||||
+ `foreach statements generate more complex assembly than I'd expect; what's going on?`_
|
||||
+ `How do I launch an individual task for each active program instance?`_
|
||||
|
||||
Understanding ispc's Output
|
||||
===========================
|
||||
@@ -212,6 +227,174 @@ easier to understand:
|
||||
jmp ___pseudo_scatter_base_offsets32_32 ## TAILCALL
|
||||
|
||||
|
||||
Running The Compiler
|
||||
====================
|
||||
|
||||
Why is it required to use one of the "generic" targets with C++ output?
|
||||
-----------------------------------------------------------------------
|
||||
|
||||
The C++ output option transforms the provided ``ispc`` program source into
|
||||
C++ code where each basic operation in the program (addition, comparison,
|
||||
etc.) is represented as a function call to an as-yet-undefined function,
|
||||
chaining the results of these calls together to perform the required
|
||||
computations. It is then expected that the user will provide the
|
||||
implementation of these functions via a header file with ``inline``
|
||||
functions defined for each of these functions and then use a C++ compiler
|
||||
to generate a final object file. (Examples of these headers include
|
||||
``examples/intrinsics/sse4.h`` and ``examples/intrinsics/knc.h`` in the
|
||||
``ispc`` distribution.)
|
||||
|
||||
If a target other than one of the "generic" ones is used with C++ output,
|
||||
then the compiler will transform certain operations into particular code
|
||||
sequences that may not be desired for the actual final target; for example,
|
||||
SSE targets that don't have hardware "gather" instructions will transform a
|
||||
gather into a sequence of scalar load instructions. When this in turn is
|
||||
transformed to C++ code, the fact that the loads were originally a gather
|
||||
is lost, and the header file of function definitions wouldn't have a chance
|
||||
to map the "gather" to a target-specific operation, as the ``knc.h`` header
|
||||
does, for example. Thus, the "generic" targets exist to provide basic
|
||||
targets of various vector widths, without imposing any limitations on the
|
||||
final target's capabilities.
|
||||
|
||||
Why won't the compiler generate an object file or assembly output with the "generic" targets?
|
||||
---------------------------------------------------------------------------------------------
|
||||
|
||||
As described in the above FAQ entry, when compiling to the "generic"
|
||||
targets, ``ispc`` generates vector code for the source program that
|
||||
transforms every basic operation in the program (addition, comparison,
|
||||
etc.) into a separate function call.
|
||||
|
||||
While there is no fundamental reason that the compiler couldn't generate
|
||||
target-specific object code with a function call to an undefined function
|
||||
for each primitive operation, doing so wouldn't actually be useful in
|
||||
practice--providing definitions of these functions in a separate object
|
||||
file and actually performing function calls for each of them (versus
|
||||
turning them into inline function calls) would be a highly inefficient way
|
||||
to run the program.
|
||||
|
||||
Therefore, in the interests of encouraging the use of the system,
|
||||
these types of output are disallowed.
|
||||
|
||||
|
||||
Language Details
|
||||
================
|
||||
|
||||
What is the difference between "int \*foo" and "int foo[]"?
|
||||
-----------------------------------------------------------
|
||||
|
||||
In C and C++, declaring a function to take a parameter ``int *foo`` and
|
||||
``int foo[]`` results in the same type for the parameter. Both are
|
||||
pointers to integers. In ``ispc``, these are different types. The first
|
||||
one is a varying pointer to a uniform integer value in memory, while the
|
||||
second results in a uniform pointer to the start of an array of varying
|
||||
integer values in memory.
|
||||
|
||||
To understand why the first is a varying pointer to a uniform integer,
|
||||
first recall that types without explicit rate qualifiers (``uniform``,
|
||||
``varying``, or ``soa<>``) are ``varying`` by default. Second, recall from
|
||||
the `discussion of pointer types in the ispc User's Guide`_ that pointed-to
|
||||
types without rate qualifiers are ``uniform`` by default. (This second
|
||||
rule is discussed further below, in `Why are pointed-to types "uniform" by
|
||||
default?`_.) The type of ``int *foo`` follows from these.
|
||||
|
||||
.. _discussion of pointer types in the ispc User's Guide: ispc.html#pointer-types
|
||||
|
||||
Conversely, in a function body, ``int foo[10]`` represents a declaration of
|
||||
a 10-element array of varying ``int`` values. In that we'd certainly like
|
||||
to be able to pass such an array to a function that takes a ``int []``
|
||||
parameter, the natural type for an ``int []`` parameter is a uniform
|
||||
pointer to varying integer values.
|
||||
|
||||
In terms of compatibility with C/C++, it's unfortunate that this
|
||||
distinction exists, though any other set of rules seems to introduce more
|
||||
awkwardness than this one. (Though we're interested to hear ideas to
|
||||
improve these rules!).
|
||||
|
||||
Why are pointed-to types "uniform" by default?
|
||||
----------------------------------------------
|
||||
|
||||
In ``ispc``, types without rate qualifiers are "varying" by default, but
|
||||
types pointed to by pointers without rate qualifiers are "uniform" by
|
||||
default. Why this difference?
|
||||
|
||||
::
|
||||
|
||||
int foo; // no rate qualifier, "varying int".
|
||||
uniform int *foo; // pointer type has no rate qualifier, pointed-to does.
|
||||
// "varying pointer to uniform int".
|
||||
int *foo; // neither pointer type nor pointed-to type ("int") have
|
||||
// rate qualifiers. Pointer type is varying by default,
|
||||
// pointed-to is uniform. "varying pointer to uniform int".
|
||||
varying int *foo; // varying pointer to varying int
|
||||
|
||||
The first rule, having types without rate qualifiers be varying by default,
|
||||
is a default that keeps the number of "uniform" or "varying" qualifiers in
|
||||
``ispc`` programs low. Most ``ispc`` programs use mostly "varying"
|
||||
variables, so this rule allows most variables to be declared without also
|
||||
requiring rate qualifiers.
|
||||
|
||||
On a related note, this rule allows many C/C++ functions to be used to
|
||||
define equivalent functions in the SPMD execution model that ``ispc``
|
||||
provides with little or no modification:
|
||||
|
||||
::
|
||||
|
||||
// scalar add in C/C++, SPMD/vector add in ispc
|
||||
int add(int a, int b) { return a + b; }
|
||||
|
||||
This motivation also explains why ``uniform int *foo`` represents a varying
|
||||
pointer; having pointers be varying by default if they don't have rate
|
||||
qualifiers similarly helps with porting code from C/C++ to ``ispc``.
|
||||
|
||||
The tricker issue is why pointed-to types are "uniform" by default. In our
|
||||
experience, data in memory that is accessed via pointers is most often
|
||||
uniform; this generally includes all data that has been allocated and
|
||||
initialized by the C/C++ application code. In practice, "varying" types are
|
||||
more generally (but not exclusively) used for local data in ``ispc``
|
||||
functions. Thus, making the pointed-to type uniform by default leads to
|
||||
more concise code for the most common cases.
|
||||
|
||||
|
||||
What am I getting an error about assigning a varying lvalue to a reference type?
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
Given code like the following:
|
||||
|
||||
::
|
||||
|
||||
uniform float a[...];
|
||||
int index = ...;
|
||||
float &r = a[index];
|
||||
|
||||
``ispc`` issues the error "Initializer for reference-type variable "r" must
|
||||
have a uniform lvalue type.". The underlying issue stems from how
|
||||
references are represented in the code generated by ``ispc``. Recall that
|
||||
``ispc`` supports both uniform and varying pointer types--a uniform pointer
|
||||
points to the same location in memory for all program instances in the
|
||||
gang, while a varying pointer allows each program instance to have its own
|
||||
pointer value.
|
||||
|
||||
References are represented a pointer in the code generated by ``ispc``,
|
||||
though this is generally opaque to the user; in ``ispc``, they are
|
||||
specifically uniform pointers. This design decision was made so that given
|
||||
code like this:
|
||||
|
||||
::
|
||||
|
||||
extern void func(float &val);
|
||||
float foo = ...;
|
||||
func(foo);
|
||||
|
||||
Then the reference would be handled efficiently as a single pointer, rather
|
||||
than unnecessarily being turned into a gang-size of pointers.
|
||||
|
||||
However, an implication of this decision is that it's not possible for
|
||||
references to refer to completely different things for each of the program
|
||||
instances. (And hence the error that is issued). In cases where a unique
|
||||
per-program-instance pointer is needed, a varying pointer should be used
|
||||
instead of a reference.
|
||||
|
||||
|
||||
Interoperability
|
||||
================
|
||||
|
||||
@@ -346,6 +529,92 @@ In a similar fashion, it's possible to find out at run-time the value of
|
||||
export uniform int width() { return programCount; }
|
||||
|
||||
|
||||
Is it possible to inline ispc functions in C/C++ code?
|
||||
------------------------------------------------------
|
||||
|
||||
If you're willing to use the ``clang`` C/C++ compiler that's part of the
|
||||
LLVM tool suite, then it is possible to inline ``ispc`` code with C/C++
|
||||
(and conversely, to inline C/C++ calls in ``ispc``). Doing so can provide
|
||||
performance advantages when calling out to short functions written in the
|
||||
"other" language. Note that you don't need to use ``clang`` to compile all
|
||||
of your C/C++ code, but only for the files where you want to be able to
|
||||
inline. In order to do this, you must have a full installation of LLVM
|
||||
version 3.0 or later, including the ``clang`` compiler.
|
||||
|
||||
The basic approach is to have the various compilers emit LLVM intermediate
|
||||
representation (IR) code and to then use tools from LLVM to link together
|
||||
the IR from the compilers and then re-optimize it, which gives the LLVM
|
||||
optimizer the opportunity to do additional inlining and cross-function
|
||||
optimizations. If you have source files ``foo.ispc`` and ``foo.cpp``,
|
||||
first emit LLVM IR:
|
||||
|
||||
::
|
||||
|
||||
ispc --emit-llvm -o foo_ispc.bc foo.ispc
|
||||
clang -O2 -c -emit-llvm -o foo_cpp.bc foo.cpp
|
||||
|
||||
Next, link the two IR files into a single file and run the LLVM optimizer
|
||||
on the result:
|
||||
|
||||
::
|
||||
|
||||
llvm-link foo_ispc.bc foo_cpp.bc -o - | opt -O3 -o foo_opt.bc
|
||||
|
||||
And finally, generate a native object file:
|
||||
|
||||
::
|
||||
|
||||
llc -filetype=obj foo_opt.bc -o foo.o
|
||||
|
||||
This file can in turn be linked in with the rest of your object files when
|
||||
linking your applicaiton.
|
||||
|
||||
(Note that if you're using the AVX instruction set, you must provide the
|
||||
``-mattr=+avx`` flag to ``llc``.)
|
||||
|
||||
|
||||
Why is it illegal to pass "varying" values from C/C++ to ispc functions?
|
||||
------------------------------------------------------------------------
|
||||
|
||||
If any of the types in the parameter list to an exported function is
|
||||
"varying" (including recursively, and members of structure types, etc.),
|
||||
then ``ispc`` will issue an error and refuse to compile the function:
|
||||
|
||||
::
|
||||
|
||||
% echo "export int add(int x) { return ++x; }" | ispc
|
||||
<stdin>:1:12: Error: Illegal to return a "varying" type from exported function "foo"
|
||||
<stdin>:1:20: Error: Varying parameter "x" is illegal in an exported function.
|
||||
|
||||
While there's no fundamental reason why this isn't possible, recall the
|
||||
definition of "varying" variables: they have one value for each program
|
||||
instance in the gang. As such, the number of values and amount of storage
|
||||
required to represent a varying variable depends on the gang size
|
||||
(i.e. ``programCount``), which can have different values depending on the
|
||||
compilation target.
|
||||
|
||||
``ispc`` therefore prohibits passing "varying" values between the
|
||||
application and the ``ispc`` program in order to prevent the
|
||||
application-side code from depending on a particular gang size, in order to
|
||||
encourage portability to different gang sizes. (A generally desirable
|
||||
programming practice.)
|
||||
|
||||
For cases where the size of data is actually fixed from the application
|
||||
side, the value can be passed via a pointer to a short ``uniform`` array,
|
||||
as follows:
|
||||
|
||||
::
|
||||
|
||||
export void add4(uniform int ptr[4]) {
|
||||
foreach (i = 0 ... 4)
|
||||
ptr[i]++;
|
||||
}
|
||||
|
||||
On the 4-wide SSE instruction set, this compiles to a single vector add
|
||||
instruction (and associated move instructions), while it still also
|
||||
efficiently computes the correct result on 8-wide AVX targets.
|
||||
|
||||
|
||||
Programming Techniques
|
||||
======================
|
||||
|
||||
@@ -480,3 +749,131 @@ you can use ``--target=sse4`` when compiling to run with ``valgrind``.
|
||||
Note that ``valgrind`` does not yet support programs that use the AVX
|
||||
instruction set.
|
||||
|
||||
foreach statements generate more complex assembly than I'd expect; what's going on?
|
||||
-----------------------------------------------------------------------------------
|
||||
|
||||
Given a simple ``foreach`` loop like the following:
|
||||
|
||||
::
|
||||
|
||||
void foo(uniform float a[], uniform int count) {
|
||||
foreach (i = 0 ... count)
|
||||
a[i] *= 2;
|
||||
}
|
||||
|
||||
|
||||
the ``ispc`` compiler generates approximately 40 instructions--why isn't
|
||||
the generated code simpler?
|
||||
|
||||
There are two main components to the code: one handles
|
||||
``programCount``-sized chunks of elements of the array, and the other
|
||||
handles any excess elements at the end of the array that don't completely
|
||||
fill a gang. The code for the main loop is essentially what one would
|
||||
expect: a vector of values are laoded from the array, the multiply is done,
|
||||
and the result is stored.
|
||||
|
||||
::
|
||||
|
||||
LBB0_2: ## %foreach_full_body
|
||||
movslq %edx, %rdx
|
||||
vmovups (%rdi,%rdx), %ymm1
|
||||
vmulps %ymm0, %ymm1, %ymm1
|
||||
vmovups %ymm1, (%rdi,%rdx)
|
||||
addl $32, %edx
|
||||
addl $8, %eax
|
||||
cmpl %ecx, %eax
|
||||
jl LBB0_2
|
||||
|
||||
|
||||
Then, there is a sequence of instructions that handles any additional
|
||||
elements at the end of the array. (These instructions don't execute if
|
||||
there aren't any left-over values to process, but they do lengthen the
|
||||
amount of generated code.)
|
||||
|
||||
::
|
||||
|
||||
## BB#4: ## %partial_inner_only
|
||||
vmovd %eax, %xmm0
|
||||
vinsertf128 $1, %xmm0, %ymm0, %ymm0
|
||||
vpermilps $0, %ymm0, %ymm0 ## ymm0 = ymm0[0,0,0,0,4,4,4,4]
|
||||
vextractf128 $1, %ymm0, %xmm3
|
||||
vmovd %esi, %xmm2
|
||||
vmovaps LCPI0_1(%rip), %ymm1
|
||||
vextractf128 $1, %ymm1, %xmm4
|
||||
vpaddd %xmm4, %xmm3, %xmm3
|
||||
# ....
|
||||
vmulps LCPI0_0(%rip), %ymm1, %ymm1
|
||||
vmaskmovps %ymm1, %ymm0, (%rdi,%rax)
|
||||
|
||||
|
||||
If you know that the number of elements to be processed will always be an
|
||||
exact multiple of the 8, 16, etc., then adding a simple assignment to
|
||||
``count`` like the one below gives the compiler enough information to be
|
||||
able to eliminate the code for the additional array elements.
|
||||
|
||||
::
|
||||
|
||||
void foo(uniform float a[], uniform int count) {
|
||||
// This assignment doesn't change the value of count
|
||||
// if it's a multiple of 16, but it gives the compiler
|
||||
// insight into this fact, allowing for simpler code to
|
||||
// be generated for the foreach loop.
|
||||
count = (count & ~(16-1));
|
||||
foreach (i = 0 ... count)
|
||||
a[i] *= 2;
|
||||
}
|
||||
|
||||
With this new version of ``foo()``, only the code for the first loop above
|
||||
is generated.
|
||||
|
||||
|
||||
How do I launch an individual task for each active program instance?
|
||||
--------------------------------------------------------------------
|
||||
|
||||
Recall from the `discussion of "launch" in the ispc User's Guide`_ that a
|
||||
``launch`` statement launches a single task corresponding to a single gang
|
||||
of executing program instances, where the indices of the active program
|
||||
instances are the same as were active when the ``launch`` statement
|
||||
executed.
|
||||
|
||||
.. _discussion of "launch" in the ispc User's Guide: ispc.html#task-parallelism-launch-and-sync-statements
|
||||
|
||||
In some situations, it's desirable to be able to launch an individual task
|
||||
for each executing program instance. For example, we might be performing
|
||||
an iterative computation where a subset of the program instances determine
|
||||
that an item they are responsible for requires additional processing.
|
||||
|
||||
::
|
||||
|
||||
bool itemNeedsMoreProcessing(int);
|
||||
int itemNum = ...;
|
||||
if (itemNeedsMoreProcessing(itemNum)) {
|
||||
// do additional work
|
||||
}
|
||||
|
||||
For performance reasons, it may be desirable to apply an entire gang's
|
||||
worth of comptuation to each item that needs additional processing;
|
||||
there may be available parallelism in this computation such that we'd like
|
||||
to process each of the items with SPMD computation.
|
||||
|
||||
In this case, the ``foreach_active`` and ``unmasked`` constructs can be
|
||||
applied together to accomplish this goal.
|
||||
|
||||
::
|
||||
|
||||
// do additional work
|
||||
task void doWork(uniform int index);
|
||||
foreach_active (index) {
|
||||
unmasked {
|
||||
launch doWork(extract(itemNum, index));
|
||||
}
|
||||
}
|
||||
|
||||
Recall that the body of the ``foreach_active`` loop runs once for each
|
||||
active program instance, with each active program instance's
|
||||
``programIndex`` value available in ``index`` in the above. In the loop,
|
||||
we can re-establish an "all on" execution mask, enabling execution in all
|
||||
of the program instances in the gang, such that execution in ``doWork()``
|
||||
starts with all instances running. (Alternatively, the ``unmasked`` block
|
||||
could be in the definition of ``doWork()``.)
|
||||
|
||||
|
||||
2505
docs/ispc.rst
2505
docs/ispc.rst
File diff suppressed because it is too large
Load Diff
179
docs/news.rst
Normal file
179
docs/news.rst
Normal file
@@ -0,0 +1,179 @@
|
||||
=========
|
||||
ispc News
|
||||
=========
|
||||
|
||||
ispc 1.9.1 is Released
|
||||
----------------------
|
||||
|
||||
An ``ispc`` release with new native AVX512 target for future Xeon CPUs and
|
||||
improvements for debugging. Release is based on patched version LLVM 3.8 backend.
|
||||
|
||||
For more details, please check `Release Notes`_.
|
||||
|
||||
.. _Release Notes: https://github.com/ispc/ispc/blob/master/docs/ReleaseNotes.txt
|
||||
|
||||
ispc 1.9.0 is Released
|
||||
----------------------
|
||||
|
||||
An ``ispc`` release with AVX512 (KNL flavor) support and a number of bug fixes,
|
||||
based on fresh LLVM 3.8 backend.
|
||||
|
||||
For more details, please check `Release Notes`_.
|
||||
|
||||
.. _Release Notes: https://github.com/ispc/ispc/blob/master/docs/ReleaseNotes.txt
|
||||
|
||||
ispc 1.8.2 is Released
|
||||
----------------------
|
||||
|
||||
An update of ``ispc`` with several important stability fixes and an experimental
|
||||
AVX512 support has been released. Binaries are based on LLVM 3.6.1. Binaries with
|
||||
native AVX512 support are based on LLVM 3.7 (r238198).
|
||||
|
||||
For more details, please check `Release Notes`_.
|
||||
|
||||
.. _Release Notes: https://github.com/ispc/ispc/blob/master/docs/ReleaseNotes.txt
|
||||
|
||||
ispc 1.8.1 is Released
|
||||
----------------------
|
||||
|
||||
A minor update of ``ispc`` with several important stability fixes has been
|
||||
released. Problem with auto-dispatch on Linux is fixed (affects only pre-built
|
||||
binaries), the problem with -O2 -g is also fixed. There are several
|
||||
improvements in Xeon Phi support. Similar to 1.8.0 all binaries are based on
|
||||
LLVM 3.5.
|
||||
|
||||
ispc 1.8.0 is Released
|
||||
----------------------
|
||||
|
||||
A major new version of ``ispc``, which introduces experimental support for NVPTX
|
||||
target, brings numerous improvements to our KNC (Xeon Phi) support, introduces
|
||||
debugging support on Windows and fixes several bugs. We also ship experimental
|
||||
build for Sony PlayStation4 target in this release. Binaries for all platforms
|
||||
are based on LLVM 3.5.
|
||||
|
||||
ispc 1.7.0 is Released
|
||||
----------------------
|
||||
|
||||
A major new version of ``ispc`` with several language and library extensions and
|
||||
fixes in debug info support. Binaries for all platforms are based on patched
|
||||
version on LLVM 3.4. There also performance improvements beyond switchover to
|
||||
LLVM 3.4.
|
||||
|
||||
ispc 1.6.0 is Released
|
||||
----------------------
|
||||
|
||||
A major update of ``ispc`` has been released. The main focus is on improved
|
||||
performance and stability. Several new targets were added. There are also
|
||||
a number of language and library extensions. Released binaries are based on
|
||||
patched LLVM 3.3 on Linux and MacOS and LLVM 3.4rc3 on Windows. Please refer
|
||||
to Release Notes for complete set of changes.
|
||||
|
||||
ispc 1.5.0 is Released
|
||||
----------------------
|
||||
|
||||
A major update of ``ispc`` has been released with several new targets available
|
||||
and bunch of performance and stability fixes. The released binaries are built
|
||||
with patched version of LLVM 3.3. Please refer to Release Notes for complete
|
||||
set of changes.
|
||||
|
||||
ispc 1.4.4 is Released
|
||||
----------------------
|
||||
|
||||
A minor update of ``ispc`` has been released with several stability improvements.
|
||||
The released binaries are built with patched version of LLVM 3.3. Since this
|
||||
release we also distribute 32 bit Linux binaries.
|
||||
|
||||
ispc 1.4.3 is Released
|
||||
----------------------
|
||||
|
||||
A minor update of ``ispc`` has been released with several stability improvements.
|
||||
All tests and examples now properly compile and execute on native targets on
|
||||
Unix platforms (Linux and MacOS).
|
||||
The released binaries are built with patched version of LLVM 3.3.
|
||||
|
||||
ispc 1.4.2 is Released
|
||||
----------------------
|
||||
|
||||
A minor update of ``ispc`` has been released with stability fix for AVX2
|
||||
(Haswell), fix for Win32 platform and performance improvements on Xeon Phi.
|
||||
As usual, it's available on all supported platforms (Windows, Linux and MacOS).
|
||||
This version supports LLVM 3.1, 3.2, 3.3 and 3.4, but now we are recommending
|
||||
to avoid 3.1, as it's known to contain a number of stability problems and we are
|
||||
planning to deprecate its support soon.
|
||||
The released binaries are built with 3.3.
|
||||
|
||||
ispc 1.4.1 is Released
|
||||
----------------------
|
||||
|
||||
A major new version of ``ispc`` has been released with stability and
|
||||
performance improvements on all supported platforms (Windows, Linux and MacOS).
|
||||
This version supports LLVM 3.1, 3.2, 3.3 and 3.4. The released binaries are
|
||||
built with 3.2.
|
||||
|
||||
ispc 1.3.0 is Released
|
||||
----------------------
|
||||
|
||||
A major new version of ``ispc`` has been released. In addition to a number
|
||||
of new language features, this release notably features initial support for
|
||||
compiling to the Intel Xeon Phi (Many Integrated Core) architecture.
|
||||
|
||||
ispc 1.2.1 is Released
|
||||
----------------------
|
||||
|
||||
This is a bugfix release, fixing approximately 20 bugs in the system and
|
||||
improving error handling and error reporting. New functionality includes
|
||||
very efficient float/half conversion routines thanks to Fabian
|
||||
Giesen. See the `1.2.1 release notes`_ for details.
|
||||
|
||||
.. _1.2.1 release notes: https://github.com/ispc/ispc/tree/master/docs/ReleaseNotes.txt
|
||||
|
||||
ispc 1.2.0 is Released
|
||||
-----------------------
|
||||
|
||||
A new major release was posted on March 20, 2012. This release includes
|
||||
significant new functionality for cleanly handling "structure of arrays"
|
||||
(SoA) data layout and a new model for how uniform and varying are handled
|
||||
with structure types.
|
||||
|
||||
Paper on ispc To Appear in InPar 2012
|
||||
-------------------------------------
|
||||
|
||||
A technical paper on ``ispc``, `ispc: A SPMD Compiler for High-Performance
|
||||
CPU Programming`_, by Matt Pharr and William R. Mark, has been accepted to
|
||||
the `InPar 2012`_ conference. This paper describes a number of the design
|
||||
features and key characteristics of the ``ispc`` implementation.
|
||||
|
||||
(© 2012 IEEE. Personal use of this material is permitted. Permission from
|
||||
IEEE must be obtained for all other uses, in any current or future media,
|
||||
including reprinting/republishing this material for advertising or
|
||||
promotional purposes, creating new collective works, for resale or
|
||||
redistribution to servers or lists, or reuse of any copyrighted component
|
||||
of this work in other works.).
|
||||
|
||||
.. _ispc\: A SPMD Compiler for High-Performance CPU Programming: https://github.com/downloads/ispc/ispc/ispc_inpar_2012.pdf
|
||||
.. _InPar 2012: http://innovativeparallel.org/
|
||||
|
||||
ispc 1.1.4 is Released
|
||||
----------------------
|
||||
|
||||
On February 4, 2012, the 1.1.4 release of ``ispc`` was posted; new features
|
||||
include ``new`` and ``delete`` for dynamic memory allocation in ``ispc``
|
||||
programs, "local" atomic operations in the standard library, and a new
|
||||
scalar compilation target. See the `1.1.4 release notes`_ for details.
|
||||
|
||||
.. _1.1.4 release notes: https://github.com/ispc/ispc/tree/master/docs/ReleaseNotes.txt
|
||||
|
||||
|
||||
ispc 1.1.3 is Released
|
||||
----------------------
|
||||
|
||||
With this release, the language now supports "switch" statements, with the same semantics and syntax as in C.
|
||||
|
||||
This release includes fixes for two important performance related issues:
|
||||
the quality of code generated for "foreach" statements has been
|
||||
substantially improved, and performance regression with code for "gathers"
|
||||
that was introduced in v1.1.2 has been fixed in this release.
|
||||
|
||||
Thanks to Jean-Luc Duprat for a number of patches that improve support for
|
||||
building on various platforms, and to Pierre-Antoine Lacaze for patches so
|
||||
that ispc builds under MinGW.
|
||||
@@ -13,6 +13,7 @@ the most out of ``ispc`` in practice.
|
||||
+ `Improving Control Flow Coherence With "foreach_tiled"`_
|
||||
+ `Using Coherent Control Flow Constructs`_
|
||||
+ `Use "uniform" Whenever Appropriate`_
|
||||
+ `Use "Structure of Arrays" Layout When Possible`_
|
||||
|
||||
* `Tips and Techniques`_
|
||||
|
||||
@@ -20,6 +21,7 @@ the most out of ``ispc`` in practice.
|
||||
+ `Avoid 64-bit Addressing Calculations When Possible`_
|
||||
+ `Avoid Computation With 8 and 16-bit Integer Types`_
|
||||
+ `Implementing Reductions Efficiently`_
|
||||
+ `Using "foreach_active" Effectively`_
|
||||
+ `Using Low-level Vector Tricks`_
|
||||
+ `The "Fast math" Option`_
|
||||
+ `"inline" Aggressively`_
|
||||
@@ -247,6 +249,76 @@ but it's always best to provide the compiler with as much help as possible
|
||||
to understand the actual form of your computation.
|
||||
|
||||
|
||||
Use "Structure of Arrays" Layout When Possible
|
||||
----------------------------------------------
|
||||
|
||||
In general, memory access performance (for both reads and writes) is best
|
||||
when the running program instances access a contiguous region of memory; in
|
||||
this case efficient vector load and store instructions can often be used
|
||||
rather than gathers and scatters. As an example of this issue, consider an
|
||||
array of a simple point datatype laid out and accessed in conventional
|
||||
"array of structures" (AOS) layout:
|
||||
|
||||
::
|
||||
|
||||
struct Point { float x, y, z; };
|
||||
uniform Point pts[...];
|
||||
float v = pts[programIndex].x;
|
||||
|
||||
In the above code, the access to ``pts[programIndex].x`` accesses
|
||||
non-sequential memory locations, due to the ``y`` and ``z`` values between
|
||||
the desired ``x`` values in memory. A "gather" is required to get the
|
||||
value of ``v``, with a corresponding decrease in performance.
|
||||
|
||||
If ``Point`` was defined as a "structure of arrays" (SOA) type, the access
|
||||
can be much more efficient:
|
||||
|
||||
::
|
||||
|
||||
struct Point8 { float x[8], y[8], z[8]; };
|
||||
uniform Point8 pts8[...];
|
||||
int majorIndex = programIndex / 8;
|
||||
int minorIndex = programIndex % 8;
|
||||
float v = pts8[majorIndex].x[minorIndex];
|
||||
|
||||
In this case, each ``Point8`` has 8 ``x`` values contiguous in memory
|
||||
before 8 ``y`` values and then 8 ``z`` values. If the gang size is 8 or
|
||||
less, the access for ``v`` will have the same value of ``majorIndex`` for
|
||||
all program instances and will access consecutive elements of the ``x[8]``
|
||||
array with a vector load. (For larger gang sizes, two 8-wide vector loads
|
||||
would be issues, which is also quite efficient.)
|
||||
|
||||
However, the syntax in the above code is messy; accessing SOA data in this
|
||||
fashion is much less elegant than the corresponding code for accessing the
|
||||
data with AOS layout. The ``soa`` qualifier in ``ispc`` can be used to
|
||||
cause the corresponding transformation to be made to the ``Point`` type,
|
||||
while preserving the clean syntax for data access that comes with AOS
|
||||
layout:
|
||||
|
||||
::
|
||||
|
||||
soa<8> Point pts[...];
|
||||
float v = pts[programIndex].x;
|
||||
|
||||
Thanks to having SOA layout a first-class concept in the language's type
|
||||
system, it's easy to write functions that convert data between the
|
||||
layouts. For example, the ``aos_to_soa`` function below converts ``count``
|
||||
elements of the given ``Point`` type from AOS to 8-wide SOA layout. (It
|
||||
assumes that the caller has pre-allocated sufficient space in the
|
||||
``pts_soa`` output array.
|
||||
|
||||
::
|
||||
|
||||
void aos_to_soa(uniform Point pts_aos[], uniform int count,
|
||||
soa<8> pts_soa[]) {
|
||||
foreach (i = 0 ... count)
|
||||
pts_soa[i] = pts_aos[i];
|
||||
}
|
||||
|
||||
Analogously, a function could be written to convert back from SOA to AOS if
|
||||
needed.
|
||||
|
||||
|
||||
Tips and Techniques
|
||||
===================
|
||||
|
||||
@@ -339,6 +411,12 @@ based on the index, it can be worth doing. See the example
|
||||
``examples/volume_rendering`` in the ``ispc`` distribution for the use of
|
||||
this technique in an instance where it is beneficial to performance.
|
||||
|
||||
Understanding Memory Read Coalescing
|
||||
------------------------------------
|
||||
|
||||
XXXX todo
|
||||
|
||||
|
||||
Avoid 64-bit Addressing Calculations When Possible
|
||||
--------------------------------------------------
|
||||
|
||||
@@ -433,6 +511,43 @@ values--very efficient code in the end.
|
||||
return reduce_add(sum);
|
||||
}
|
||||
|
||||
Using "foreach_active" Effectively
|
||||
----------------------------------
|
||||
|
||||
For high-performance code,
|
||||
|
||||
For example, consider this segment of code, from the introduction of
|
||||
``foreach_active`` in the ispc User's Guide:
|
||||
|
||||
::
|
||||
|
||||
uniform float array[...] = { ... };
|
||||
int index = ...;
|
||||
foreach_active (i) {
|
||||
++array[index];
|
||||
}
|
||||
|
||||
Here, ``index`` was assumed to possibly have the same value for multiple
|
||||
program instances, so the updates to ``array[index]`` are serialized by the
|
||||
``foreach_active`` statement in order to not have undefined results when
|
||||
``index`` values do collide.
|
||||
|
||||
The code generated by the compiler can be improved in this case by making
|
||||
it clear that only a single element of the array is accessed by
|
||||
``array[index]`` and that thus a general gather or scatter isn't required.
|
||||
Specifically, by using the ``extract()`` function from the standard library
|
||||
to extract the current program instance's value of ``index`` into a
|
||||
``uniform`` variable and then using that to index into ``array``, as below,
|
||||
more efficient code is generated.
|
||||
|
||||
::
|
||||
|
||||
foreach_active (instanceNum) {
|
||||
uniform int unifIndex = extract(index, instanceNum);
|
||||
++array[unifIndex];
|
||||
}
|
||||
|
||||
|
||||
Using Low-level Vector Tricks
|
||||
-----------------------------
|
||||
|
||||
@@ -547,7 +662,7 @@ gathers happen.)
|
||||
|
||||
extern "C" {
|
||||
void ISPCInstrument(const char *fn, const char *note,
|
||||
int line, int mask);
|
||||
int line, uint64_t mask);
|
||||
}
|
||||
|
||||
This function is passed the file name of the ``ispc`` file running, a short
|
||||
@@ -560,7 +675,7 @@ as follows:
|
||||
|
||||
::
|
||||
|
||||
ISPCInstrument("foo.ispc", "function entry", 55, 0xf);
|
||||
ISPCInstrument("foo.ispc", "function entry", 55, 0xfull);
|
||||
|
||||
This call indicates that at the currently executing program has just
|
||||
entered the function defined at line 55 of the file ``foo.ispc``, with a
|
||||
@@ -671,7 +786,7 @@ countries.
|
||||
|
||||
* Other names and brands may be claimed as the property of others.
|
||||
|
||||
Copyright(C) 2011, Intel Corporation. All rights reserved.
|
||||
Copyright(C) 2011-2016, Intel Corporation. All rights reserved.
|
||||
|
||||
|
||||
Optimization Notice
|
||||
|
||||
66
docs/template-news.txt
Normal file
66
docs/template-news.txt
Normal file
@@ -0,0 +1,66 @@
|
||||
%(head_prefix)s
|
||||
%(head)s
|
||||
<script type="text/javascript">
|
||||
|
||||
var _gaq = _gaq || [];
|
||||
_gaq.push(['_setAccount', 'UA-1486404-4']);
|
||||
_gaq.push(['_trackPageview']);
|
||||
|
||||
(function() {
|
||||
var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
|
||||
ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
|
||||
var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
|
||||
})();
|
||||
|
||||
</script>
|
||||
%(stylesheet)s
|
||||
%(body_prefix)s
|
||||
<div id="wrap">
|
||||
<div id="wrap2">
|
||||
<div id="header">
|
||||
<h1 id="logo">Intel SPMD Program Compiler</h1>
|
||||
<div id="slogan">An open-source compiler for high-performance SIMD programming on
|
||||
the CPU</div>
|
||||
</div>
|
||||
<div id="nav">
|
||||
<div id="nbar">
|
||||
<ul>
|
||||
<li><a href="index.html">Overview</a></li>
|
||||
<li id="selected"><a href="news.html">News</a></li>
|
||||
<li><a href="features.html">Features</a></li>
|
||||
<li><a href="downloads.html">Downloads</a></li>
|
||||
<li><a href="documentation.html">Documentation</a></li>
|
||||
<li><a href="perf.html">Performance</a></li>
|
||||
<li><a href="contrib.html">Contributors</a></li>
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
<div id="content-wrap">
|
||||
<div id="sidebar">
|
||||
<div class="widgetspace">
|
||||
<h1>Resources</h1>
|
||||
<ul class="menu">
|
||||
<li><a href="http://github.com/ispc/ispc/">ispc page on github</a></li>
|
||||
<li><a href="http://groups.google.com/group/ispc-users/">ispc
|
||||
users mailing list</a></li>
|
||||
<li><a href="http://groups.google.com/group/ispc-dev/">ispc
|
||||
developers mailing list</a></li>
|
||||
<li><a href="http://github.com/ispc/ispc/wiki/">Wiki</a></li>
|
||||
<li><a href="http://github.com/ispc/ispc/issues/">Bug tracking</a></li>
|
||||
<li><a href="doxygen/index.html">Doxygen</a></li>
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
%(body_pre_docinfo)s
|
||||
%(docinfo)s
|
||||
<div id="content">
|
||||
%(body)s
|
||||
</div>
|
||||
<div class="clearfix"></div>
|
||||
<div id="footer"> © 2011-2016 <strong>Intel Corporation</strong> | Valid <a href="http://validator.w3.org/check?uri=referer">XHTML</a> | <a href="http://jigsaw.w3.org/css-validator/check/referer">CSS</a> | ClearBlue by: <a href="http://www.themebin.com/">ThemeBin</a>
|
||||
<!-- Please Do Not remove this link, thank u -->
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
%(body_suffix)s
|
||||
@@ -26,10 +26,12 @@
|
||||
<div id="nbar">
|
||||
<ul>
|
||||
<li><a href="index.html">Overview</a></li>
|
||||
<li><a href="news.html">News</a></li>
|
||||
<li><a href="features.html">Features</a></li>
|
||||
<li><a href="downloads.html">Downloads</a></li>
|
||||
<li><a href="documentation.html">Documentation</a></li>
|
||||
<li id="selected"><a href="perf.html">Performance</a></li>
|
||||
<li><a href="contrib.html">Contributors</a></li>
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
@@ -45,8 +47,7 @@
|
||||
developers mailing list</a></li>
|
||||
<li><a href="http://github.com/ispc/ispc/wiki/">Wiki</a></li>
|
||||
<li><a href="http://github.com/ispc/ispc/issues/">Bug tracking</a></li>
|
||||
<li><a href="doxygen/index.html">Doxygen documentation of
|
||||
<tt>ispc</tt> source code</a></li>
|
||||
<li><a href="doxygen/index.html">Doxygen</a></li>
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
@@ -56,7 +57,7 @@
|
||||
%(body)s
|
||||
</div>
|
||||
<div class="clearfix"></div>
|
||||
<div id="footer"> © 2011 <strong>Intel Corporation</strong> | Valid <a href="http://validator.w3.org/check?uri=referer">XHTML</a> | <a href="http://jigsaw.w3.org/css-validator/check/referer">CSS</a> | ClearBlue by: <a href="http://www.themebin.com/">ThemeBin</a>
|
||||
<div id="footer"> © 2011-2016 <strong>Intel Corporation</strong> | Valid <a href="http://validator.w3.org/check?uri=referer">XHTML</a> | <a href="http://jigsaw.w3.org/css-validator/check/referer">CSS</a> | ClearBlue by: <a href="http://www.themebin.com/">ThemeBin</a>
|
||||
<!-- Please Do Not remove this link, thank u -->
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -26,10 +26,12 @@
|
||||
<div id="nbar">
|
||||
<ul>
|
||||
<li><a href="index.html">Overview</a></li>
|
||||
<li><a href="news.html">News</a></li>
|
||||
<li><a href="features.html">Features</a></li>
|
||||
<li><a href="downloads.html">Downloads</a></li>
|
||||
<li id="selected"><a href="documentation.html">Documentation</a></li>
|
||||
<li><a href="perf.html">Performance</a></li>
|
||||
<li><a href="contrib.html">Contributors</a></li>
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
@@ -45,8 +47,7 @@
|
||||
developers mailing list</a></li>
|
||||
<li><a href="http://github.com/ispc/ispc/wiki/">Wiki</a></li>
|
||||
<li><a href="http://github.com/ispc/ispc/issues/">Bug tracking</a></li>
|
||||
<li><a href="doxygen/index.html">Doxygen documentation of
|
||||
<tt>ispc</tt> source code</a></li>
|
||||
<li><a href="doxygen/index.html">Doxygen</a></li>
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
@@ -56,7 +57,7 @@
|
||||
%(body)s
|
||||
</div>
|
||||
<div class="clearfix"></div>
|
||||
<div id="footer"> © 2011 <strong>Intel Corporation</strong> | Valid <a href="http://validator.w3.org/check?uri=referer">XHTML</a> | <a href="http://jigsaw.w3.org/css-validator/check/referer">CSS</a> | ClearBlue by: <a href="http://www.themebin.com/">ThemeBin</a>
|
||||
<div id="footer"> © 2011-2016 <strong>Intel Corporation</strong> | Valid <a href="http://validator.w3.org/check?uri=referer">XHTML</a> | <a href="http://jigsaw.w3.org/css-validator/check/referer">CSS</a> | ClearBlue by: <a href="http://www.themebin.com/">ThemeBin</a>
|
||||
<!-- Please Do Not remove this link, thank u -->
|
||||
</div>
|
||||
</div>
|
||||
|
||||
11
doxygen.cfg
11
doxygen.cfg
@@ -31,7 +31,7 @@ PROJECT_NAME = "Intel SPMD Program Compiler"
|
||||
# This could be handy for archiving the generated documentation or
|
||||
# if some version control system is used.
|
||||
|
||||
PROJECT_NUMBER = 1.1.2
|
||||
PROJECT_NUMBER = 1.9.2dev
|
||||
|
||||
# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute)
|
||||
# base path where the generated documentation will be put.
|
||||
@@ -581,10 +581,12 @@ WARN_LOGFILE =
|
||||
# directories like "/usr/src/myproject". Separate the files or directories
|
||||
# with spaces.
|
||||
|
||||
INPUT = builtins.h \
|
||||
INPUT = ast.h \
|
||||
builtins.h \
|
||||
ctx.h \
|
||||
decl.h \
|
||||
expr.h \
|
||||
func.h \
|
||||
ispc.h \
|
||||
llvmutil.h \
|
||||
module.h \
|
||||
@@ -593,10 +595,13 @@ INPUT = builtins.h \
|
||||
sym.h \
|
||||
type.h \
|
||||
util.h \
|
||||
ast.cpp \
|
||||
builtins.cpp \
|
||||
cbackend.cpp \
|
||||
ctx.cpp \
|
||||
decl.cpp \
|
||||
expr.cpp \
|
||||
func.cpp \
|
||||
ispc.cpp \
|
||||
llvmutil.cpp \
|
||||
main.cpp \
|
||||
@@ -608,7 +613,7 @@ INPUT = builtins.h \
|
||||
util.cpp \
|
||||
parse.yy \
|
||||
lex.ll \
|
||||
builtins-c.c
|
||||
builtins/builtins.c
|
||||
|
||||
# This tag can be used to specify the character encoding of the source files
|
||||
# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is
|
||||
|
||||
@@ -39,9 +39,6 @@ example implementation of this function that counts the number of times the
|
||||
callback is made and records some statistics about control flow coherence
|
||||
is provided in the instrument.cpp file.
|
||||
|
||||
*** Note: on Linux, this example currently hits an assertion in LLVM during
|
||||
*** compilation
|
||||
|
||||
|
||||
Deferred
|
||||
========
|
||||
@@ -76,6 +73,14 @@ This directory includes three implementations of the algorithm:
|
||||
light culling and shading.
|
||||
|
||||
|
||||
GMRES
|
||||
=====
|
||||
|
||||
An implementation of the generalized minimal residual method for solving
|
||||
sparse matrix equations.
|
||||
(http://en.wikipedia.org/wiki/Generalized_minimal_residual_method)
|
||||
|
||||
|
||||
Mandelbrot
|
||||
==========
|
||||
|
||||
@@ -110,6 +115,13 @@ This program implements both the Black-Scholes and Binomial options pricing
|
||||
models in both ispc and regular serial C++ code.
|
||||
|
||||
|
||||
Perfbench
|
||||
=========
|
||||
|
||||
This runs a number of microbenchmarks to measure system performance and
|
||||
code generation quality.
|
||||
|
||||
|
||||
RT
|
||||
==
|
||||
|
||||
@@ -134,6 +146,11 @@ This is a simple "hello world" type program that shows a ~10 line
|
||||
application program calling out to a ~5 line ispc program to do a simple
|
||||
computation.
|
||||
|
||||
Sort
|
||||
====
|
||||
This is a bucket sort of 32 bit unsigned integers.
|
||||
By default 1000000 random elements get sorted.
|
||||
Call ./sort N in order to sort N elements instead.
|
||||
|
||||
Volume
|
||||
======
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
EXAMPLE=ao
|
||||
CPP_SRC=ao.cpp ao_serial.cpp
|
||||
ISPC_SRC=ao.ispc
|
||||
ISPC_TARGETS=sse2,sse4,avx
|
||||
ISPC_IA_TARGETS=sse2-i32x4,sse4-i32x4,avx1-i32x8,avx2-i32x8,avx512knl-i32x16,avx512skx-i32x16
|
||||
ISPC_ARM_TARGETS=neon
|
||||
|
||||
include ../common.mk
|
||||
|
||||
@@ -60,7 +60,7 @@ using namespace ispc;
|
||||
|
||||
extern void ao_serial(int w, int h, int nsubsamples, float image[]);
|
||||
|
||||
static unsigned int test_iterations;
|
||||
static unsigned int test_iterations[] = {3, 7, 1};
|
||||
static unsigned int width, height;
|
||||
static unsigned char *img;
|
||||
static float *fimg;
|
||||
@@ -106,16 +106,20 @@ savePPM(const char *fname, int w, int h)
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
if (argc != 4) {
|
||||
if (argc < 3) {
|
||||
printf ("%s\n", argv[0]);
|
||||
printf ("Usage: ao [num test iterations] [width] [height]\n");
|
||||
printf ("Usage: ao [width] [height] [ispc iterations] [tasks iterations] [serial iterations]\n");
|
||||
getchar();
|
||||
exit(-1);
|
||||
}
|
||||
else {
|
||||
test_iterations = atoi(argv[1]);
|
||||
width = atoi (argv[2]);
|
||||
height = atoi (argv[3]);
|
||||
if (argc == 6) {
|
||||
for (int i = 0; i < 3; i++) {
|
||||
test_iterations[i] = atoi(argv[3 + i]);
|
||||
}
|
||||
}
|
||||
width = atoi (argv[1]);
|
||||
height = atoi (argv[2]);
|
||||
}
|
||||
|
||||
// Allocate space for output images
|
||||
@@ -127,18 +131,19 @@ int main(int argc, char **argv)
|
||||
// time for any of them.
|
||||
//
|
||||
double minTimeISPC = 1e30;
|
||||
for (unsigned int i = 0; i < test_iterations; i++) {
|
||||
for (unsigned int i = 0; i < test_iterations[0]; i++) {
|
||||
memset((void *)fimg, 0, sizeof(float) * width * height * 3);
|
||||
assert(NSUBSAMPLES == 2);
|
||||
|
||||
reset_and_start_timer();
|
||||
ao_ispc(width, height, NSUBSAMPLES, fimg);
|
||||
double t = get_elapsed_mcycles();
|
||||
printf("@time of ISPC run:\t\t\t[%.3f] million cycles\n", t);
|
||||
minTimeISPC = std::min(minTimeISPC, t);
|
||||
}
|
||||
|
||||
// Report results and save image
|
||||
printf("[aobench ispc]:\t\t\t[%.3f] M cycles (%d x %d image)\n",
|
||||
printf("[aobench ispc]:\t\t\t[%.3f] million cycles (%d x %d image)\n",
|
||||
minTimeISPC, width, height);
|
||||
savePPM("ao-ispc.ppm", width, height);
|
||||
|
||||
@@ -147,18 +152,19 @@ int main(int argc, char **argv)
|
||||
// minimum time for any of them.
|
||||
//
|
||||
double minTimeISPCTasks = 1e30;
|
||||
for (unsigned int i = 0; i < test_iterations; i++) {
|
||||
for (unsigned int i = 0; i < test_iterations[1]; i++) {
|
||||
memset((void *)fimg, 0, sizeof(float) * width * height * 3);
|
||||
assert(NSUBSAMPLES == 2);
|
||||
|
||||
reset_and_start_timer();
|
||||
ao_ispc_tasks(width, height, NSUBSAMPLES, fimg);
|
||||
double t = get_elapsed_mcycles();
|
||||
printf("@time of ISPC + TASKS run:\t\t\t[%.3f] million cycles\n", t);
|
||||
minTimeISPCTasks = std::min(minTimeISPCTasks, t);
|
||||
}
|
||||
|
||||
// Report results and save image
|
||||
printf("[aobench ispc + tasks]:\t\t[%.3f] M cycles (%d x %d image)\n",
|
||||
printf("[aobench ispc + tasks]:\t\t[%.3f] million cycles (%d x %d image)\n",
|
||||
minTimeISPCTasks, width, height);
|
||||
savePPM("ao-ispc-tasks.ppm", width, height);
|
||||
|
||||
@@ -167,16 +173,17 @@ int main(int argc, char **argv)
|
||||
// minimum time.
|
||||
//
|
||||
double minTimeSerial = 1e30;
|
||||
for (unsigned int i = 0; i < test_iterations; i++) {
|
||||
for (unsigned int i = 0; i < test_iterations[2]; i++) {
|
||||
memset((void *)fimg, 0, sizeof(float) * width * height * 3);
|
||||
reset_and_start_timer();
|
||||
ao_serial(width, height, NSUBSAMPLES, fimg);
|
||||
double t = get_elapsed_mcycles();
|
||||
printf("@time of serial run:\t\t\t\t[%.3f] million cycles\n", t);
|
||||
minTimeSerial = std::min(minTimeSerial, t);
|
||||
}
|
||||
|
||||
// Report more results, save another image...
|
||||
printf("[aobench serial]:\t\t[%.3f] M cycles (%d x %d image)\n", minTimeSerial,
|
||||
printf("[aobench serial]:\t\t[%.3f] million cycles (%d x %d image)\n", minTimeSerial,
|
||||
width, height);
|
||||
printf("\t\t\t\t(%.2fx speedup from ISPC, %.2fx speedup from ISPC + tasks)\n",
|
||||
minTimeSerial / minTimeISPC, minTimeSerial / minTimeISPCTasks);
|
||||
|
||||
@@ -50,7 +50,6 @@ struct Isect {
|
||||
struct Sphere {
|
||||
vec center;
|
||||
float radius;
|
||||
|
||||
};
|
||||
|
||||
struct Plane {
|
||||
@@ -82,8 +81,8 @@ static inline void vnormalize(vec &v) {
|
||||
}
|
||||
|
||||
|
||||
static inline void
|
||||
ray_plane_intersect(Isect &isect, Ray &ray, Plane &plane) {
|
||||
static void
|
||||
ray_plane_intersect(Isect &isect, Ray &ray, uniform Plane &plane) {
|
||||
float d = -dot(plane.p, plane.n);
|
||||
float v = dot(ray.dir, plane.n);
|
||||
|
||||
@@ -103,7 +102,7 @@ ray_plane_intersect(Isect &isect, Ray &ray, Plane &plane) {
|
||||
|
||||
|
||||
static inline void
|
||||
ray_sphere_intersect(Isect &isect, Ray &ray, Sphere &sphere) {
|
||||
ray_sphere_intersect(Isect &isect, Ray &ray, uniform Sphere &sphere) {
|
||||
vec rs = ray.org - sphere.center;
|
||||
|
||||
float B = dot(rs, ray.dir);
|
||||
@@ -124,7 +123,7 @@ ray_sphere_intersect(Isect &isect, Ray &ray, Sphere &sphere) {
|
||||
}
|
||||
|
||||
|
||||
static inline void
|
||||
static void
|
||||
orthoBasis(vec basis[3], vec n) {
|
||||
basis[2] = n;
|
||||
basis[1].x = 0.0; basis[1].y = 0.0; basis[1].z = 0.0;
|
||||
@@ -147,8 +146,8 @@ orthoBasis(vec basis[3], vec n) {
|
||||
}
|
||||
|
||||
|
||||
static inline float
|
||||
ambient_occlusion(Isect &isect, Plane &plane, Sphere spheres[3],
|
||||
static float
|
||||
ambient_occlusion(Isect &isect, uniform Plane &plane, uniform Sphere spheres[3],
|
||||
RNGState &rngstate) {
|
||||
float eps = 0.0001f;
|
||||
vec p, n;
|
||||
@@ -204,112 +203,52 @@ ambient_occlusion(Isect &isect, Plane &plane, Sphere spheres[3],
|
||||
static void ao_scanlines(uniform int y0, uniform int y1, uniform int w,
|
||||
uniform int h, uniform int nsubsamples,
|
||||
uniform float image[]) {
|
||||
static Plane plane = { { 0.0f, -0.5f, 0.0f }, { 0.f, 1.f, 0.f } };
|
||||
static Sphere spheres[3] = {
|
||||
static uniform Plane plane = { { 0.0f, -0.5f, 0.0f }, { 0.f, 1.f, 0.f } };
|
||||
static uniform Sphere spheres[3] = {
|
||||
{ { -2.0f, 0.0f, -3.5f }, 0.5f },
|
||||
{ { -0.5f, 0.0f, -3.0f }, 0.5f },
|
||||
{ { 1.0f, 0.0f, -2.2f }, 0.5f } };
|
||||
RNGState rngstate;
|
||||
|
||||
seed_rng(&rngstate, y0);
|
||||
seed_rng(&rngstate, programIndex + (y0 << (programIndex & 15)));
|
||||
float invSamples = 1.f / nsubsamples;
|
||||
|
||||
// Compute the mapping between the 'programCount'-wide program
|
||||
// instances running in parallel and samples in the image.
|
||||
//
|
||||
// For now, we'll always take four samples per pixel, so start by
|
||||
// initializing du and dv with offsets into subpixel samples. We'll
|
||||
// take care of further updating du and dv for the case where we're
|
||||
// doing more than 4 program instances in parallel shortly.
|
||||
uniform float uSteps[4] = { 0, 1, 0, 1 };
|
||||
uniform float vSteps[4] = { 0, 0, 1, 1 };
|
||||
float du = uSteps[programIndex % 4] / nsubsamples;
|
||||
float dv = vSteps[programIndex % 4] / nsubsamples;
|
||||
foreach_tiled(y = y0 ... y1, x = 0 ... w,
|
||||
u = 0 ... nsubsamples, v = 0 ... nsubsamples) {
|
||||
float du = (float)u * invSamples, dv = (float)v * invSamples;
|
||||
|
||||
// Now handle the case where we are able to do more than one pixel's
|
||||
// worth of work at once. nx records the number of pixels in the x
|
||||
// direction we do per iteration and ny the number in y.
|
||||
uniform int nx = 1, ny = 1;
|
||||
// Figure out x,y pixel in NDC
|
||||
float px = (x + du - (w / 2.0f)) / (w / 2.0f);
|
||||
float py = -(y + dv - (h / 2.0f)) / (h / 2.0f);
|
||||
float ret = 0.f;
|
||||
Ray ray;
|
||||
Isect isect;
|
||||
|
||||
// FIXME: We actually need ny to be 1 regardless of the decomposition,
|
||||
// since the task decomposition is one scanline high.
|
||||
ray.org = 0.f;
|
||||
|
||||
if (programCount == 8) {
|
||||
// Do two pixels at once in the x direction
|
||||
nx = 2;
|
||||
if (programIndex >= 4)
|
||||
// And shift the offsets for the second pixel's worth of work
|
||||
++du;
|
||||
}
|
||||
else if (programCount == 16) {
|
||||
nx = 4;
|
||||
ny = 1;
|
||||
if (programIndex >= 4 && programIndex < 8)
|
||||
++du;
|
||||
if (programIndex >= 8 && programIndex < 12)
|
||||
du += 2;
|
||||
if (programIndex >= 12)
|
||||
du += 3;
|
||||
}
|
||||
// Poor man's perspective projection
|
||||
ray.dir.x = px;
|
||||
ray.dir.y = py;
|
||||
ray.dir.z = -1.0;
|
||||
vnormalize(ray.dir);
|
||||
|
||||
// Now loop over all of the pixels, stepping in x and y as calculated
|
||||
// above. (Assumes that ny divides y and nx divides x...)
|
||||
for (uniform int y = y0; y < y1; y += ny) {
|
||||
for (uniform int x = 0; x < w; x += nx) {
|
||||
// Figure out x,y pixel in NDC
|
||||
float px = (x + du - (w / 2.0f)) / (w / 2.0f);
|
||||
float py = -(y + dv - (h / 2.0f)) / (h / 2.0f);
|
||||
float ret = 0.f;
|
||||
Ray ray;
|
||||
Isect isect;
|
||||
isect.t = 1.0e+17;
|
||||
isect.hit = 0;
|
||||
|
||||
ray.org = 0.f;
|
||||
for (uniform int snum = 0; snum < 3; ++snum)
|
||||
ray_sphere_intersect(isect, ray, spheres[snum]);
|
||||
ray_plane_intersect(isect, ray, plane);
|
||||
|
||||
// Poor man's perspective projection
|
||||
ray.dir.x = px;
|
||||
ray.dir.y = py;
|
||||
ray.dir.z = -1.0;
|
||||
vnormalize(ray.dir);
|
||||
// Note use of 'coherent' if statement; the set of rays we
|
||||
// trace will often all hit or all miss the scene
|
||||
cif (isect.hit) {
|
||||
ret = ambient_occlusion(isect, plane, spheres, rngstate);
|
||||
ret *= invSamples * invSamples;
|
||||
|
||||
isect.t = 1.0e+17;
|
||||
isect.hit = 0;
|
||||
|
||||
for (uniform int snum = 0; snum < 3; ++snum)
|
||||
ray_sphere_intersect(isect, ray, spheres[snum]);
|
||||
ray_plane_intersect(isect, ray, plane);
|
||||
|
||||
// Note use of 'coherent' if statement; the set of rays we
|
||||
// trace will often all hit or all miss the scene
|
||||
cif (isect.hit)
|
||||
ret = ambient_occlusion(isect, plane, spheres, rngstate);
|
||||
|
||||
// This is a little grungy; we have results for
|
||||
// programCount-worth of values. Because we're doing 2x2
|
||||
// subsamples, we need to peel them off in groups of four,
|
||||
// average the four values for each pixel, and update the
|
||||
// output image.
|
||||
//
|
||||
// Store the varying value to a uniform array of the same size.
|
||||
// See the discussion about communication among program
|
||||
// instances in the ispc user's manual for more discussion on
|
||||
// this idiom.
|
||||
uniform float retArray[programCount];
|
||||
retArray[programIndex] = ret;
|
||||
|
||||
// offset to the first pixel in the image
|
||||
uniform int offset = 3 * (y * w + x);
|
||||
for (uniform int p = 0; p < programCount; p += 4, offset += 3) {
|
||||
// Get the four sample values for this pixel
|
||||
uniform float sumret = retArray[p] + retArray[p+1] + retArray[p+2] +
|
||||
retArray[p+3];
|
||||
|
||||
// Normalize by number of samples taken
|
||||
sumret /= nsubsamples * nsubsamples;
|
||||
|
||||
// Store result in the image
|
||||
image[offset+0] = sumret;
|
||||
image[offset+1] = sumret;
|
||||
image[offset+2] = sumret;
|
||||
}
|
||||
int offset = 3 * (y * w + x);
|
||||
atomic_add_local(&image[offset], ret);
|
||||
atomic_add_local(&image[offset+1], ret);
|
||||
atomic_add_local(&image[offset+2], ret);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -329,5 +268,5 @@ static void task ao_task(uniform int width, uniform int height,
|
||||
|
||||
export void ao_ispc_tasks(uniform int w, uniform int h, uniform int nsubsamples,
|
||||
uniform float image[]) {
|
||||
launch[h] < ao_task(w, h, nsubsamples, image) >;
|
||||
launch[h] ao_task(w, h, nsubsamples, image);
|
||||
}
|
||||
|
||||
@@ -18,159 +18,17 @@
|
||||
<Platform>x64</Platform>
|
||||
</ProjectConfiguration>
|
||||
</ItemGroup>
|
||||
<PropertyGroup Label="Globals">
|
||||
<ProjectGuid>{F29204CA-19DF-4F3C-87D5-03F4EEDAAFEB}</ProjectGuid>
|
||||
<Keyword>Win32Proj</Keyword>
|
||||
<RootNamespace>aobench</RootNamespace>
|
||||
<ISPC_file>ao</ISPC_file>
|
||||
<default_targets>sse2,sse4,avx1-i32x8</default_targets>
|
||||
</PropertyGroup>
|
||||
<Import Project="..\common.props" />
|
||||
<ItemGroup>
|
||||
<ClCompile Include="ao.cpp" />
|
||||
<ClCompile Include="ao_serial.cpp" />
|
||||
<ClCompile Include="../tasksys.cpp" />
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<CustomBuild Include="ao.ispc">
|
||||
<FileType>Document</FileType>
|
||||
<Command Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">ispc -O2 %(Filename).ispc -o $(TargetDir)%(Filename).obj -h $(TargetDir)%(Filename)_ispc.h --arch=x86 --target=sse2,sse4,avx
|
||||
</Command>
|
||||
<Command Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">ispc -O2 %(Filename).ispc -o $(TargetDir)%(Filename).obj -h $(TargetDir)%(Filename)_ispc.h --target=sse2,sse4,avx
|
||||
</Command>
|
||||
<Outputs Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">$(TargetDir)%(Filename).obj;$(TargetDir)%(Filename)_sse2.obj;$(TargetDir)%(Filename)_sse4.obj;$(TargetDir)%(Filename)_avx.obj;$(TargetDir)%(Filename)_ispc.h</Outputs>
|
||||
<Outputs Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">$(TargetDir)%(Filename).obj;$(TargetDir)%(Filename)_sse2.obj;$(TargetDir)%(Filename)_sse4.obj;$(TargetDir)%(Filename)_avx.obj;$(TargetDir)%(Filename)_ispc.h</Outputs>
|
||||
<Command Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">ispc -O2 %(Filename).ispc -o $(TargetDir)%(Filename).obj -h $(TargetDir)%(Filename)_ispc.h --arch=x86 --target=sse2,sse4,avx
|
||||
</Command>
|
||||
<Command Condition="'$(Configuration)|$(Platform)'=='Release|x64'">ispc -O2 %(Filename).ispc -o $(TargetDir)%(Filename).obj -h $(TargetDir)%(Filename)_ispc.h --target=sse2,sse4,avx
|
||||
</Command>
|
||||
<Outputs Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">$(TargetDir)%(Filename).obj;$(TargetDir)%(Filename)_sse2.obj;$(TargetDir)%(Filename)_sse4.obj;$(TargetDir)%(Filename)_avx.obj;$(TargetDir)%(Filename)_ispc.h</Outputs>
|
||||
<Outputs Condition="'$(Configuration)|$(Platform)'=='Release|x64'">$(TargetDir)%(Filename).obj;$(TargetDir)%(Filename)_sse2.obj;$(TargetDir)%(Filename)_sse4.obj;$(TargetDir)%(Filename)_avx.obj;$(TargetDir)%(Filename)_ispc.h</Outputs>
|
||||
</CustomBuild>
|
||||
</ItemGroup>
|
||||
<PropertyGroup Label="Globals">
|
||||
<ProjectGuid>{F29204CA-19DF-4F3C-87D5-03F4EEDAAFEB}</ProjectGuid>
|
||||
<Keyword>Win32Proj</Keyword>
|
||||
<RootNamespace>aobench</RootNamespace>
|
||||
</PropertyGroup>
|
||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
|
||||
<ConfigurationType>Application</ConfigurationType>
|
||||
<UseDebugLibraries>true</UseDebugLibraries>
|
||||
<CharacterSet>Unicode</CharacterSet>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
|
||||
<ConfigurationType>Application</ConfigurationType>
|
||||
<UseDebugLibraries>true</UseDebugLibraries>
|
||||
<CharacterSet>Unicode</CharacterSet>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
|
||||
<ConfigurationType>Application</ConfigurationType>
|
||||
<UseDebugLibraries>false</UseDebugLibraries>
|
||||
<WholeProgramOptimization>true</WholeProgramOptimization>
|
||||
<CharacterSet>Unicode</CharacterSet>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
|
||||
<ConfigurationType>Application</ConfigurationType>
|
||||
<UseDebugLibraries>false</UseDebugLibraries>
|
||||
<WholeProgramOptimization>true</WholeProgramOptimization>
|
||||
<CharacterSet>Unicode</CharacterSet>
|
||||
</PropertyGroup>
|
||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
|
||||
<ImportGroup Label="ExtensionSettings">
|
||||
</ImportGroup>
|
||||
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
|
||||
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
||||
</ImportGroup>
|
||||
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
|
||||
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
||||
</ImportGroup>
|
||||
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
|
||||
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
||||
</ImportGroup>
|
||||
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">
|
||||
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
||||
</ImportGroup>
|
||||
<PropertyGroup Label="UserMacros" />
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
|
||||
<LinkIncremental>true</LinkIncremental>
|
||||
<ExecutablePath>$(ProjectDir)..\..;$(ExecutablePath)</ExecutablePath>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
|
||||
<LinkIncremental>true</LinkIncremental>
|
||||
<ExecutablePath>$(ExecutablePath);$(ProjectDir)..\..</ExecutablePath>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
|
||||
<LinkIncremental>false</LinkIncremental>
|
||||
<ExecutablePath>$(ProjectDir)..\..;$(ExecutablePath)</ExecutablePath>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
|
||||
<LinkIncremental>false</LinkIncremental>
|
||||
<ExecutablePath>$(ProjectDir)..\..;$(ExecutablePath)</ExecutablePath>
|
||||
</PropertyGroup>
|
||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
|
||||
<ClCompile>
|
||||
<PrecompiledHeader>
|
||||
</PrecompiledHeader>
|
||||
<WarningLevel>Level3</WarningLevel>
|
||||
<Optimization>Disabled</Optimization>
|
||||
<PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<AdditionalIncludeDirectories>$(TargetDir)</AdditionalIncludeDirectories>
|
||||
<IntrinsicFunctions>true</IntrinsicFunctions>
|
||||
<FloatingPointModel>Fast</FloatingPointModel>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
|
||||
<ClCompile>
|
||||
<PrecompiledHeader>
|
||||
</PrecompiledHeader>
|
||||
<WarningLevel>Level3</WarningLevel>
|
||||
<Optimization>Disabled</Optimization>
|
||||
<PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<AdditionalIncludeDirectories>$(TargetDir)</AdditionalIncludeDirectories>
|
||||
<IntrinsicFunctions>true</IntrinsicFunctions>
|
||||
<FloatingPointModel>Fast</FloatingPointModel>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
|
||||
<ClCompile>
|
||||
<WarningLevel>Level3</WarningLevel>
|
||||
<PrecompiledHeader>
|
||||
</PrecompiledHeader>
|
||||
<Optimization>MaxSpeed</Optimization>
|
||||
<FunctionLevelLinking>true</FunctionLevelLinking>
|
||||
<IntrinsicFunctions>true</IntrinsicFunctions>
|
||||
<PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<AdditionalIncludeDirectories>$(TargetDir)</AdditionalIncludeDirectories>
|
||||
<FloatingPointModel>Fast</FloatingPointModel>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
<EnableCOMDATFolding>true</EnableCOMDATFolding>
|
||||
<OptimizeReferences>true</OptimizeReferences>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
|
||||
<ClCompile>
|
||||
<WarningLevel>Level3</WarningLevel>
|
||||
<PrecompiledHeader>
|
||||
</PrecompiledHeader>
|
||||
<Optimization>MaxSpeed</Optimization>
|
||||
<FunctionLevelLinking>true</FunctionLevelLinking>
|
||||
<IntrinsicFunctions>true</IntrinsicFunctions>
|
||||
<PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<AdditionalIncludeDirectories>$(TargetDir)</AdditionalIncludeDirectories>
|
||||
<FloatingPointModel>Fast</FloatingPointModel>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
<EnableCOMDATFolding>true</EnableCOMDATFolding>
|
||||
<OptimizeReferences>true</OptimizeReferences>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
|
||||
<ImportGroup Label="ExtensionTargets">
|
||||
</ImportGroup>
|
||||
</Project>
|
||||
</Project>
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
|
||||
CXX=g++ -m64
|
||||
CXX=clang++ -m64
|
||||
CXXFLAGS=-Iobjs/ -g3 -Wall
|
||||
ISPC=ispc
|
||||
ISPCFLAGS=-O2 --instrument --arch=x86-64 --target=sse2
|
||||
@@ -14,13 +14,13 @@ dirs:
|
||||
clean:
|
||||
/bin/rm -rf objs *~ ao
|
||||
|
||||
ao: objs/ao.o objs/instrument.o objs/ao_ispc.o ../tasksys.cpp
|
||||
ao: objs/ao.o objs/instrument.o objs/ao_instrumented_ispc.o ../tasksys.cpp
|
||||
$(CXX) $(CXXFLAGS) -o $@ $^ -lm -lpthread
|
||||
|
||||
objs/%.o: %.cpp dirs
|
||||
$(CXX) $< $(CXXFLAGS) -c -o $@
|
||||
|
||||
objs/ao.o: objs/ao_ispc.h
|
||||
objs/ao.o: objs/ao_instrumented_ispc.h
|
||||
|
||||
objs/%_ispc.h objs/%_ispc.o: %.ispc dirs
|
||||
$(ISPC) $(ISPCFLAGS) $< -o objs/$*_ispc.o -h objs/$*_instrumented_ispc.h
|
||||
$(ISPC) $(ISPCFLAGS) $< -o objs/$*_ispc.o -h objs/$*_ispc.h
|
||||
|
||||
@@ -35,6 +35,8 @@
|
||||
#define NOMINMAX
|
||||
#pragma warning (disable: 4244)
|
||||
#pragma warning (disable: 4305)
|
||||
// preventing MSVC fopen() deprecation complaints
|
||||
#define _CRT_SECURE_NO_DEPRECATE
|
||||
#endif
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
@@ -211,7 +211,7 @@ static void ao_scanlines(uniform int y0, uniform int y1, uniform int w,
|
||||
{ { 1.0f, 0.0f, -2.2f }, 0.5f } };
|
||||
RNGState rngstate;
|
||||
|
||||
seed_rng(&rngstate, y0);
|
||||
seed_rng(&rngstate, programIndex + (y0 << (programIndex & 15)));
|
||||
|
||||
// Compute the mapping between the 'programCount'-wide program
|
||||
// instances running in parallel and samples in the image.
|
||||
@@ -329,5 +329,5 @@ static void task ao_task(uniform int width, uniform int height,
|
||||
|
||||
export void ao_ispc_tasks(uniform int w, uniform int h, uniform int nsubsamples,
|
||||
uniform float image[]) {
|
||||
launch[h] < ao_task(w, h, nsubsamples, image) >;
|
||||
launch[h] ao_task(w, h, nsubsamples, image);
|
||||
}
|
||||
@@ -18,157 +18,18 @@
|
||||
<Platform>x64</Platform>
|
||||
</ProjectConfiguration>
|
||||
</ItemGroup>
|
||||
<PropertyGroup Label="Globals">
|
||||
<ProjectGuid>{B3B4AE3D-6D5A-4CF9-AF5B-43CF2131B958}</ProjectGuid>
|
||||
<Keyword>Win32Proj</Keyword>
|
||||
<RootNamespace>aobench_instrumented</RootNamespace>
|
||||
<ISPC_file>ao_instrumented</ISPC_file>
|
||||
<default_targets>sse2</default_targets>
|
||||
<flags>--instrument</flags>
|
||||
</PropertyGroup>
|
||||
<Import Project="..\common.props" />
|
||||
<ItemGroup>
|
||||
<ClCompile Include="ao.cpp" />
|
||||
<ClCompile Include="instrument.cpp" />
|
||||
<ClCompile Include="../tasksys.cpp" />
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<CustomBuild Include="ao.ispc">
|
||||
<FileType>Document</FileType>
|
||||
<Command Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">ispc -O2 %(Filename).ispc -o $(TargetDir)%(Filename)_instrumented.obj -h $(TargetDir)%(Filename)_instrumented_ispc.h --arch=x86 --instrument --target=sse2
|
||||
</Command>
|
||||
<Command Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">ispc -O2 %(Filename).ispc -o $(TargetDir)%(Filename)_instrumented.obj -h $(TargetDir)%(Filename)_instrumented_ispc.h --instrument --target=sse2
|
||||
</Command>
|
||||
<Outputs Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">$(TargetDir)%(Filename)_instrumented.obj;$(TargetDir)%(Filename)_instrumented_ispc.h</Outputs>
|
||||
<Outputs Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">$(TargetDir)%(Filename)_instrumented.obj;$(TargetDir)%(Filename)_instrumented_ispc.h</Outputs>
|
||||
<Command Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">ispc -O2 %(Filename).ispc -o $(TargetDir)%(Filename)_instrumented.obj -h $(TargetDir)%(Filename)_instrumented_ispc.h --arch=x86 --instrument --target=sse2
|
||||
</Command>
|
||||
<Command Condition="'$(Configuration)|$(Platform)'=='Release|x64'">ispc -O2 %(Filename).ispc -o $(TargetDir)%(Filename)_instrumented.obj -h $(TargetDir)%(Filename)_instrumented_ispc.h --instrument --target=sse2
|
||||
</Command>
|
||||
<Outputs Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">$(TargetDir)%(Filename)_instrumented.obj;$(TargetDir)%(Filename)_instrumented_ispc.h</Outputs>
|
||||
<Outputs Condition="'$(Configuration)|$(Platform)'=='Release|x64'">$(TargetDir)%(Filename)_instrumented.obj;$(TargetDir)%(Filename)_instrumented_ispc.h</Outputs>
|
||||
</CustomBuild>
|
||||
</ItemGroup>
|
||||
<PropertyGroup Label="Globals">
|
||||
<ProjectGuid>{B3B4AE3D-6D5A-4CF9-AF5B-43CF2131B958}</ProjectGuid>
|
||||
<Keyword>Win32Proj</Keyword>
|
||||
<RootNamespace>aobench_instrumented</RootNamespace>
|
||||
</PropertyGroup>
|
||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
|
||||
<ConfigurationType>Application</ConfigurationType>
|
||||
<UseDebugLibraries>true</UseDebugLibraries>
|
||||
<CharacterSet>Unicode</CharacterSet>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
|
||||
<ConfigurationType>Application</ConfigurationType>
|
||||
<UseDebugLibraries>true</UseDebugLibraries>
|
||||
<CharacterSet>Unicode</CharacterSet>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
|
||||
<ConfigurationType>Application</ConfigurationType>
|
||||
<UseDebugLibraries>false</UseDebugLibraries>
|
||||
<WholeProgramOptimization>true</WholeProgramOptimization>
|
||||
<CharacterSet>Unicode</CharacterSet>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
|
||||
<ConfigurationType>Application</ConfigurationType>
|
||||
<UseDebugLibraries>false</UseDebugLibraries>
|
||||
<WholeProgramOptimization>true</WholeProgramOptimization>
|
||||
<CharacterSet>Unicode</CharacterSet>
|
||||
</PropertyGroup>
|
||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
|
||||
<ImportGroup Label="ExtensionSettings">
|
||||
</ImportGroup>
|
||||
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
|
||||
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
||||
</ImportGroup>
|
||||
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
|
||||
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
||||
</ImportGroup>
|
||||
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
|
||||
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
||||
</ImportGroup>
|
||||
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">
|
||||
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
||||
</ImportGroup>
|
||||
<PropertyGroup Label="UserMacros" />
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
|
||||
<LinkIncremental>true</LinkIncremental>
|
||||
<ExecutablePath>$(ProjectDir)..\..;$(ExecutablePath)</ExecutablePath>
|
||||
<PreBuildEventUseInBuild>true</PreBuildEventUseInBuild>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
|
||||
<LinkIncremental>true</LinkIncremental>
|
||||
<ExecutablePath>$(ProjectDir)..\..;$(ExecutablePath)</ExecutablePath>
|
||||
<PreBuildEventUseInBuild>true</PreBuildEventUseInBuild>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
|
||||
<LinkIncremental>false</LinkIncremental>
|
||||
<ExecutablePath>$(ProjectDir)..\..;$(ExecutablePath)</ExecutablePath>
|
||||
<PreBuildEventUseInBuild>true</PreBuildEventUseInBuild>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
|
||||
<LinkIncremental>false</LinkIncremental>
|
||||
<ExecutablePath>$(ProjectDir)..\..;$(ExecutablePath)</ExecutablePath>
|
||||
<PreBuildEventUseInBuild>true</PreBuildEventUseInBuild>
|
||||
</PropertyGroup>
|
||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
|
||||
<ClCompile>
|
||||
<PrecompiledHeader>
|
||||
</PrecompiledHeader>
|
||||
<WarningLevel>Level3</WarningLevel>
|
||||
<Optimization>Disabled</Optimization>
|
||||
<PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<AdditionalIncludeDirectories>$(TargetDir)</AdditionalIncludeDirectories>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
|
||||
<ClCompile>
|
||||
<PrecompiledHeader>
|
||||
</PrecompiledHeader>
|
||||
<WarningLevel>Level3</WarningLevel>
|
||||
<Optimization>Disabled</Optimization>
|
||||
<PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<AdditionalIncludeDirectories>$(TargetDir)</AdditionalIncludeDirectories>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
|
||||
<ClCompile>
|
||||
<WarningLevel>Level3</WarningLevel>
|
||||
<PrecompiledHeader>
|
||||
</PrecompiledHeader>
|
||||
<Optimization>MaxSpeed</Optimization>
|
||||
<FunctionLevelLinking>true</FunctionLevelLinking>
|
||||
<IntrinsicFunctions>true</IntrinsicFunctions>
|
||||
<PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<AdditionalIncludeDirectories>$(TargetDir)</AdditionalIncludeDirectories>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
<EnableCOMDATFolding>true</EnableCOMDATFolding>
|
||||
<OptimizeReferences>true</OptimizeReferences>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
|
||||
<ClCompile>
|
||||
<WarningLevel>Level3</WarningLevel>
|
||||
<PrecompiledHeader>
|
||||
</PrecompiledHeader>
|
||||
<Optimization>MaxSpeed</Optimization>
|
||||
<FunctionLevelLinking>true</FunctionLevelLinking>
|
||||
<IntrinsicFunctions>true</IntrinsicFunctions>
|
||||
<PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<AdditionalIncludeDirectories>$(TargetDir)</AdditionalIncludeDirectories>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
<EnableCOMDATFolding>true</EnableCOMDATFolding>
|
||||
<OptimizeReferences>true</OptimizeReferences>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
|
||||
<ImportGroup Label="ExtensionTargets">
|
||||
</ImportGroup>
|
||||
</Project>
|
||||
</Project>
|
||||
|
||||
@@ -34,6 +34,8 @@
|
||||
#include "instrument.h"
|
||||
#include <stdio.h>
|
||||
#include <assert.h>
|
||||
#include <iomanip>
|
||||
#include <sstream>
|
||||
#include <string>
|
||||
#include <map>
|
||||
|
||||
@@ -46,7 +48,7 @@ struct CallInfo {
|
||||
|
||||
static std::map<std::string, CallInfo> callInfo;
|
||||
|
||||
int countbits(int i) {
|
||||
int countbits(uint64_t i) {
|
||||
int ret = 0;
|
||||
while (i) {
|
||||
if (i & 0x1)
|
||||
@@ -60,14 +62,13 @@ int countbits(int i) {
|
||||
// Callback function that ispc compiler emits calls to when --instrument
|
||||
// command-line flag is given while compiling.
|
||||
void
|
||||
ISPCInstrument(const char *fn, const char *note, int line, int mask) {
|
||||
char sline[16];
|
||||
sprintf(sline, "%04d", line);
|
||||
std::string s = std::string(fn) + std::string("(") + std::string(sline) +
|
||||
std::string(") - ") + std::string(note);
|
||||
ISPCInstrument(const char *fn, const char *note, int line, uint64_t mask) {
|
||||
std::stringstream s;
|
||||
s << fn << "(" << std::setfill('0') << std::setw(4) << line << ") - "
|
||||
<< note;
|
||||
|
||||
// Find or create a CallInfo instance for this callsite.
|
||||
CallInfo &ci = callInfo[s];
|
||||
CallInfo &ci = callInfo[s.str()];
|
||||
|
||||
// And update its statistics...
|
||||
++ci.count;
|
||||
|
||||
@@ -28,7 +28,7 @@
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef INSTRUMENT_H
|
||||
@@ -36,8 +36,8 @@
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
extern "C" {
|
||||
void ISPCInstrument(const char *fn, const char *note, int line, int mask);
|
||||
extern "C" {
|
||||
void ISPCInstrument(const char *fn, const char *note, int line, uint64_t mask);
|
||||
}
|
||||
|
||||
void ISPCPrintInstrument();
|
||||
|
||||
@@ -1,20 +1,72 @@
|
||||
|
||||
TASK_CXX=../tasksys.cpp
|
||||
TASK_LIB=-lpthread
|
||||
TASK_OBJ=tasksys.o
|
||||
TASK_OBJ=objs/tasksys.o
|
||||
|
||||
CXX=clang++
|
||||
CXXFLAGS+=-Iobjs/ -O2
|
||||
CC=clang
|
||||
CCFLAGS+=-Iobjs/ -O2
|
||||
|
||||
CXX=g++
|
||||
CXXFLAGS=-Iobjs/ -O2 -m64
|
||||
LIBS=-lm $(TASK_LIB) -lstdc++
|
||||
ISPC=ispc -O2 --arch=x86-64 $(ISPC_FLAGS)
|
||||
ISPC_OBJS=$(addprefix objs/, $(ISPC_SRC:.ispc=)_ispc.o $(ISPC_SRC:.ispc=)_ispc_sse2.o \
|
||||
$(ISPC_SRC:.ispc=)_ispc_sse4.o $(ISPC_SRC:.ispc=)_ispc_avx.o)
|
||||
ISPC=ispc
|
||||
ISPC_FLAGS+=-O2
|
||||
ISPC_HEADER=objs/$(ISPC_SRC:.ispc=_ispc.h)
|
||||
CPP_OBJS=$(addprefix objs/, $(CPP_SRC:.cpp=.o) $(TASK_OBJ))
|
||||
|
||||
ARCH:=$(shell uname -m | sed -e s/x86_64/x86/ -e s/i686/x86/ -e s/arm.*/arm/ -e s/sa110/arm/)
|
||||
|
||||
ifeq ($(ARCH),x86)
|
||||
ISPC_OBJS=$(addprefix objs/, $(ISPC_SRC:.ispc=)_ispc.o)
|
||||
COMMA=,
|
||||
ifneq (,$(findstring $(COMMA),$(ISPC_IA_TARGETS)))
|
||||
#$(info multi-target detected: $(ISPC_IA_TARGETS))
|
||||
ifneq (,$(findstring sse2,$(ISPC_IA_TARGETS)))
|
||||
ISPC_OBJS+=$(addprefix objs/, $(ISPC_SRC:.ispc=)_ispc_sse2.o)
|
||||
endif
|
||||
ifneq (,$(findstring sse4,$(ISPC_IA_TARGETS)))
|
||||
ISPC_OBJS+=$(addprefix objs/, $(ISPC_SRC:.ispc=)_ispc_sse4.o)
|
||||
endif
|
||||
ifneq (,$(findstring avx1-,$(ISPC_IA_TARGETS)))
|
||||
ISPC_OBJS+=$(addprefix objs/, $(ISPC_SRC:.ispc=)_ispc_avx.o)
|
||||
endif
|
||||
ifneq (,$(findstring avx1.1,$(ISPC_IA_TARGETS)))
|
||||
ISPC_OBJS+=$(addprefix objs/, $(ISPC_SRC:.ispc=)_ispc_avx11.o)
|
||||
endif
|
||||
ifneq (,$(findstring avx2,$(ISPC_IA_TARGETS)))
|
||||
ISPC_OBJS+=$(addprefix objs/, $(ISPC_SRC:.ispc=)_ispc_avx2.o)
|
||||
endif
|
||||
ifneq (,$(findstring avx512knl,$(ISPC_IA_TARGETS)))
|
||||
ISPC_OBJS+=$(addprefix objs/, $(ISPC_SRC:.ispc=)_ispc_avx512knl.o)
|
||||
endif
|
||||
ifneq (,$(findstring avx512skx,$(ISPC_IA_TARGETS)))
|
||||
ISPC_OBJS+=$(addprefix objs/, $(ISPC_SRC:.ispc=)_ispc_avx512skx.o)
|
||||
endif
|
||||
endif
|
||||
ISPC_TARGETS=$(ISPC_IA_TARGETS)
|
||||
ARCH_BIT:=$(shell getconf LONG_BIT)
|
||||
ifeq ($(ARCH_BIT),32)
|
||||
ISPC_FLAGS += --arch=x86
|
||||
CXXFLAGS += -m32
|
||||
CCFLAGS += -m32
|
||||
else
|
||||
ISPC_FLAGS += --arch=x86-64
|
||||
CXXFLAGS += -m64
|
||||
CCFLAGS += -m64
|
||||
endif
|
||||
else ifeq ($(ARCH),arm)
|
||||
ISPC_OBJS=$(addprefix objs/, $(ISPC_SRC:.ispc=_ispc.o))
|
||||
ISPC_TARGETS=$(ISPC_ARM_TARGETS)
|
||||
else
|
||||
$(error Unknown architecture $(ARCH) from uname -m)
|
||||
endif
|
||||
|
||||
CPP_OBJS=$(addprefix objs/, $(CPP_SRC:.cpp=.o))
|
||||
CC_OBJS=$(addprefix objs/, $(CC_SRC:.c=.o))
|
||||
OBJS=$(CPP_OBJS) $(CC_OBJS) $(TASK_OBJ) $(ISPC_OBJS)
|
||||
|
||||
default: $(EXAMPLE)
|
||||
|
||||
all: $(EXAMPLE) $(EXAMPLE)-sse4 $(EXAMPLE)-generic16
|
||||
all: $(EXAMPLE) $(EXAMPLE)-sse4 $(EXAMPLE)-generic16 $(EXAMPLE)-scalar
|
||||
|
||||
.PHONY: dirs clean
|
||||
|
||||
@@ -24,24 +76,27 @@ dirs:
|
||||
objs/%.cpp objs/%.o objs/%.h: dirs
|
||||
|
||||
clean:
|
||||
/bin/rm -rf objs *~ $(EXAMPLE) $(EXAMPLE)-sse4 $(EXAMPLE)-generic16
|
||||
/bin/rm -rf objs *~ $(EXAMPLE) $(EXAMPLE)-sse4 $(EXAMPLE)-generic16 ref test
|
||||
|
||||
$(EXAMPLE): $(CPP_OBJS) $(ISPC_OBJS)
|
||||
$(EXAMPLE): $(OBJS)
|
||||
$(CXX) $(CXXFLAGS) -o $@ $^ $(LIBS)
|
||||
|
||||
objs/%.o: %.cpp dirs $(ISPC_HEADER)
|
||||
$(CXX) $< $(CXXFLAGS) -c -o $@
|
||||
|
||||
objs/%.o: %.c dirs $(ISPC_HEADER)
|
||||
$(CC) $< $(CCFLAGS) -c -o $@
|
||||
|
||||
objs/%.o: ../%.cpp dirs
|
||||
$(CXX) $< $(CXXFLAGS) -c -o $@
|
||||
|
||||
objs/$(EXAMPLE).o: objs/$(EXAMPLE)_ispc.h
|
||||
objs/$(EXAMPLE).o: objs/$(EXAMPLE)_ispc.h dirs
|
||||
|
||||
objs/%_ispc.h objs/%_ispc.o objs/%_ispc_sse2.o objs/%_ispc_sse4.o objs/%_ispc_avx.o: %.ispc
|
||||
$(ISPC) --target=$(ISPC_TARGETS) $< -o objs/$*_ispc.o -h objs/$*_ispc.h
|
||||
objs/%_ispc.h objs/%_ispc.o objs/%_ispc_sse2.o objs/%_ispc_sse4.o objs/%_ispc_avx.o objs/%_ispc_avx11.o objs/%_ispc_avx2.o objs/%_ispc_avx512knl.o objs/%_ispc_avx512skx.o : %.ispc dirs
|
||||
$(ISPC) $(ISPC_FLAGS) --target=$(ISPC_TARGETS) $< -o objs/$*_ispc.o -h objs/$*_ispc.h
|
||||
|
||||
objs/$(ISPC_SRC:.ispc=)_sse4.cpp: $(ISPC_SRC)
|
||||
$(ISPC) $< -o $@ --target=generic-4 --emit-c++ --c++-include-file=sse4.h
|
||||
$(ISPC) $(ISPC_FLAGS) $< -o $@ --target=generic-4 --emit-c++ --c++-include-file=sse4.h
|
||||
|
||||
objs/$(ISPC_SRC:.ispc=)_sse4.o: objs/$(ISPC_SRC:.ispc=)_sse4.cpp
|
||||
$(CXX) -I../intrinsics -msse4.2 $< $(CXXFLAGS) -c -o $@
|
||||
@@ -50,10 +105,16 @@ $(EXAMPLE)-sse4: $(CPP_OBJS) objs/$(ISPC_SRC:.ispc=)_sse4.o
|
||||
$(CXX) $(CXXFLAGS) -o $@ $^ $(LIBS)
|
||||
|
||||
objs/$(ISPC_SRC:.ispc=)_generic16.cpp: $(ISPC_SRC)
|
||||
$(ISPC) $< -o $@ --target=generic-16 --emit-c++ --c++-include-file=generic-16.h
|
||||
$(ISPC) $(ISPC_FLAGS) $< -o $@ --target=generic-16 --emit-c++ --c++-include-file=generic-16.h
|
||||
|
||||
objs/$(ISPC_SRC:.ispc=)_generic16.o: objs/$(ISPC_SRC:.ispc=)_generic16.cpp
|
||||
$(CXX) -I../intrinsics $< $(CXXFLAGS) -c -o $@
|
||||
|
||||
$(EXAMPLE)-generic16: $(CPP_OBJS) objs/$(ISPC_SRC:.ispc=)_generic16.o
|
||||
$(CXX) $(CXXFLAGS) -o $@ $^ $(LIBS)
|
||||
|
||||
objs/$(ISPC_SRC:.ispc=)_scalar.o: $(ISPC_SRC)
|
||||
$(ISPC) $(ISPC_FLAGS) $< -o $@ --target=generic-1
|
||||
|
||||
$(EXAMPLE)-scalar: $(CPP_OBJS) objs/$(ISPC_SRC:.ispc=)_scalar.o
|
||||
$(CXX) $(CXXFLAGS) -o $@ $^ $(LIBS)
|
||||
|
||||
176
examples/common.props
Normal file
176
examples/common.props
Normal file
@@ -0,0 +1,176 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
||||
<ItemGroup Label="ProjectConfigurations">
|
||||
<ProjectConfiguration Include="Debug|Win32">
|
||||
<Configuration>Debug</Configuration>
|
||||
<Platform>Win32</Platform>
|
||||
</ProjectConfiguration>
|
||||
<ProjectConfiguration Include="Debug|x64">
|
||||
<Configuration>Debug</Configuration>
|
||||
<Platform>x64</Platform>
|
||||
</ProjectConfiguration>
|
||||
<ProjectConfiguration Include="Release|Win32">
|
||||
<Configuration>Release</Configuration>
|
||||
<Platform>Win32</Platform>
|
||||
</ProjectConfiguration>
|
||||
<ProjectConfiguration Include="Release|x64">
|
||||
<Configuration>Release</Configuration>
|
||||
<Platform>x64</Platform>
|
||||
</ProjectConfiguration>
|
||||
</ItemGroup>
|
||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
|
||||
<ConfigurationType>Application</ConfigurationType>
|
||||
<UseDebugLibraries>true</UseDebugLibraries>
|
||||
<CharacterSet>Unicode</CharacterSet>
|
||||
<PlatformToolset>v110</PlatformToolset>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
|
||||
<ConfigurationType>Application</ConfigurationType>
|
||||
<UseDebugLibraries>true</UseDebugLibraries>
|
||||
<CharacterSet>Unicode</CharacterSet>
|
||||
<PlatformToolset>v110</PlatformToolset>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
|
||||
<ConfigurationType>Application</ConfigurationType>
|
||||
<UseDebugLibraries>false</UseDebugLibraries>
|
||||
<WholeProgramOptimization>true</WholeProgramOptimization>
|
||||
<CharacterSet>Unicode</CharacterSet>
|
||||
<PlatformToolset>v110</PlatformToolset>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
|
||||
<ConfigurationType>Application</ConfigurationType>
|
||||
<UseDebugLibraries>false</UseDebugLibraries>
|
||||
<WholeProgramOptimization>true</WholeProgramOptimization>
|
||||
<CharacterSet>Unicode</CharacterSet>
|
||||
<PlatformToolset>v110</PlatformToolset>
|
||||
</PropertyGroup>
|
||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
|
||||
<ImportGroup Label="ExtensionSettings">
|
||||
</ImportGroup>
|
||||
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
|
||||
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
||||
</ImportGroup>
|
||||
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
|
||||
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
||||
</ImportGroup>
|
||||
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
|
||||
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
||||
</ImportGroup>
|
||||
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">
|
||||
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
||||
</ImportGroup>
|
||||
<PropertyGroup Label="UserMacros" />
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
|
||||
<LinkIncremental>true</LinkIncremental>
|
||||
<ExecutablePath>$(ProjectDir)..\..;$(ExecutablePath)</ExecutablePath>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
|
||||
<LinkIncremental>true</LinkIncremental>
|
||||
<ExecutablePath>$(ProjectDir)..\..;$(ExecutablePath)</ExecutablePath>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
|
||||
<LinkIncremental>false</LinkIncremental>
|
||||
<ExecutablePath>$(ProjectDir)..\..;$(ExecutablePath)</ExecutablePath>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
|
||||
<LinkIncremental>false</LinkIncremental>
|
||||
<ExecutablePath>$(ProjectDir)..\..;$(ExecutablePath)</ExecutablePath>
|
||||
</PropertyGroup>
|
||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
|
||||
<ClCompile>
|
||||
<PrecompiledHeader>
|
||||
</PrecompiledHeader>
|
||||
<WarningLevel>Level3</WarningLevel>
|
||||
<Optimization>Disabled</Optimization>
|
||||
<PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<AdditionalIncludeDirectories>$(TargetDir)</AdditionalIncludeDirectories>
|
||||
<IntrinsicFunctions>true</IntrinsicFunctions>
|
||||
<FloatingPointModel>Fast</FloatingPointModel>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
|
||||
<ClCompile>
|
||||
<PrecompiledHeader>
|
||||
</PrecompiledHeader>
|
||||
<WarningLevel>Level3</WarningLevel>
|
||||
<Optimization>Disabled</Optimization>
|
||||
<PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<AdditionalIncludeDirectories>$(TargetDir)</AdditionalIncludeDirectories>
|
||||
<IntrinsicFunctions>true</IntrinsicFunctions>
|
||||
<FloatingPointModel>Fast</FloatingPointModel>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
|
||||
<ClCompile>
|
||||
<WarningLevel>Level3</WarningLevel>
|
||||
<PrecompiledHeader>
|
||||
</PrecompiledHeader>
|
||||
<Optimization>MaxSpeed</Optimization>
|
||||
<FunctionLevelLinking>true</FunctionLevelLinking>
|
||||
<IntrinsicFunctions>true</IntrinsicFunctions>
|
||||
<PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<AdditionalIncludeDirectories>$(TargetDir)</AdditionalIncludeDirectories>
|
||||
<FloatingPointModel>Fast</FloatingPointModel>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
<EnableCOMDATFolding>true</EnableCOMDATFolding>
|
||||
<OptimizeReferences>true</OptimizeReferences>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
|
||||
<ClCompile>
|
||||
<WarningLevel>Level3</WarningLevel>
|
||||
<PrecompiledHeader>
|
||||
</PrecompiledHeader>
|
||||
<Optimization>MaxSpeed</Optimization>
|
||||
<FunctionLevelLinking>true</FunctionLevelLinking>
|
||||
<IntrinsicFunctions>true</IntrinsicFunctions>
|
||||
<PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<AdditionalIncludeDirectories>$(TargetDir)</AdditionalIncludeDirectories>
|
||||
<FloatingPointModel>Fast</FloatingPointModel>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
<EnableCOMDATFolding>true</EnableCOMDATFolding>
|
||||
<OptimizeReferences>true</OptimizeReferences>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<PropertyGroup Label="User">
|
||||
<ISPC_compiler Condition=" '$(ISPC_compiler)' == '' ">ispc</ISPC_compiler>
|
||||
<Target_str Condition=" '$(Target_str)' == '' ">$(default_targets)</Target_str>
|
||||
<Target_out>$(ISPC_file).obj</Target_out>
|
||||
<Target_out Condition="($(Target_str.Contains(',')) And $(Target_str.Contains('sse2')))">$(Target_out);$(ISPC_file)_sse2.obj</Target_out>
|
||||
<Target_out Condition="($(Target_str.Contains(',')) And $(Target_str.Contains('sse4')))">$(Target_out);$(ISPC_file)_sse4.obj</Target_out>
|
||||
<Target_out Condition="($(Target_str.Contains(',')) And $(Target_str.Contains('avx1-')))">$(Target_out);$(ISPC_file)_avx.obj</Target_out>
|
||||
<Target_out Condition="($(Target_str.Contains(',')) And $(Target_str.Contains('avx1.1')))">$(Target_out);$(ISPC_file)_avx11.obj</Target_out>
|
||||
<Target_out Condition="($(Target_str.Contains(',')) And $(Target_str.Contains('avx2')))">$(Target_out);$(ISPC_file)_avx2.obj</Target_out>
|
||||
</PropertyGroup>
|
||||
<ItemGroup>
|
||||
<CustomBuild Include='$(ISPC_file).ispc'>
|
||||
<FileType>Document</FileType>
|
||||
<Command Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">$(ISPC_compiler) -O0 %(Filename).ispc -o %(Filename).obj -h %(Filename)_ispc.h --arch=x86 --target=$(Target_str) -g $(flags)</Command>
|
||||
<Command Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">$(ISPC_compiler) -O0 %(Filename).ispc -o %(Filename).obj -h %(Filename)_ispc.h --target=$(Target_str) -g $(flags)</Command>
|
||||
<Outputs Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">$(Target_out)</Outputs>
|
||||
<Outputs Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">$(Target_out)</Outputs>
|
||||
<Command Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">$(ISPC_compiler) -O2 %(Filename).ispc -o %(Filename).obj -h %(Filename)_ispc.h --arch=x86 --target=$(Target_str) $(flags)</Command>
|
||||
<Command Condition="'$(Configuration)|$(Platform)'=='Release|x64'">$(ISPC_compiler) -O2 %(Filename).ispc -o %(Filename).obj -h %(Filename)_ispc.h --target=$(Target_str) $(flags)</Command>
|
||||
<Outputs Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">$(Target_out)</Outputs>
|
||||
<Outputs Condition="'$(Configuration)|$(Platform)'=='Release|x64'">$(Target_out)</Outputs>
|
||||
</CustomBuild>
|
||||
</ItemGroup>
|
||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
|
||||
<ImportGroup Label="ExtensionTargets">
|
||||
</ImportGroup>
|
||||
</Project>
|
||||
@@ -2,7 +2,8 @@
|
||||
EXAMPLE=deferred_shading
|
||||
CPP_SRC=common.cpp main.cpp dynamic_c.cpp dynamic_cilk.cpp
|
||||
ISPC_SRC=kernels.ispc
|
||||
ISPC_TARGETS=sse2,sse4-x2,avx-x2
|
||||
ISPC_IA_TARGETS=sse2-i32x4,sse4-i32x8,avx1-i32x16,avx2-i32x16,avx512knl-i32x16,avx512skx-i32x16
|
||||
ISPC_ARM_TARGETS=neon
|
||||
ISPC_FLAGS=--opt=fast-math
|
||||
|
||||
include ../common.mk
|
||||
|
||||
@@ -204,6 +204,7 @@ void WriteFrame(const char *filename, const InputData *input,
|
||||
fprintf(out, "P6 %d %d 255\n", input->header.framebufferWidth,
|
||||
input->header.framebufferHeight);
|
||||
fwrite(framebufferAOS, imageBytes, 1, out);
|
||||
fclose(out);
|
||||
|
||||
lAlignedFree(framebufferAOS);
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
||||
<ItemGroup Label="ProjectConfigurations">
|
||||
<ProjectConfiguration Include="Debug|Win32">
|
||||
@@ -21,133 +21,11 @@
|
||||
<PropertyGroup Label="Globals">
|
||||
<ProjectGuid>{87f53c53-957e-4e91-878a-bc27828fb9eb}</ProjectGuid>
|
||||
<Keyword>Win32Proj</Keyword>
|
||||
<RootNamespace>mandelbrot</RootNamespace>
|
||||
<RootNamespace>deferred</RootNamespace>
|
||||
<ISPC_file>kernels</ISPC_file>
|
||||
<default_targets>sse2,sse4-x2,avx1-x2</default_targets>
|
||||
</PropertyGroup>
|
||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
|
||||
<ConfigurationType>Application</ConfigurationType>
|
||||
<UseDebugLibraries>true</UseDebugLibraries>
|
||||
<CharacterSet>Unicode</CharacterSet>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
|
||||
<ConfigurationType>Application</ConfigurationType>
|
||||
<UseDebugLibraries>true</UseDebugLibraries>
|
||||
<CharacterSet>Unicode</CharacterSet>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
|
||||
<ConfigurationType>Application</ConfigurationType>
|
||||
<UseDebugLibraries>false</UseDebugLibraries>
|
||||
<WholeProgramOptimization>true</WholeProgramOptimization>
|
||||
<CharacterSet>Unicode</CharacterSet>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
|
||||
<ConfigurationType>Application</ConfigurationType>
|
||||
<UseDebugLibraries>false</UseDebugLibraries>
|
||||
<WholeProgramOptimization>true</WholeProgramOptimization>
|
||||
<CharacterSet>Unicode</CharacterSet>
|
||||
</PropertyGroup>
|
||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
|
||||
<ImportGroup Label="ExtensionSettings">
|
||||
</ImportGroup>
|
||||
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
|
||||
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
||||
</ImportGroup>
|
||||
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
|
||||
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
||||
</ImportGroup>
|
||||
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
|
||||
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
||||
</ImportGroup>
|
||||
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">
|
||||
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
||||
</ImportGroup>
|
||||
<PropertyGroup Label="UserMacros" />
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
|
||||
<LinkIncremental>true</LinkIncremental>
|
||||
<ExecutablePath>$(ProjectDir)..\..;$(ExecutablePath)</ExecutablePath>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
|
||||
<LinkIncremental>true</LinkIncremental>
|
||||
<ExecutablePath>$(ProjectDir)..\..;$(ExecutablePath)</ExecutablePath>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
|
||||
<LinkIncremental>false</LinkIncremental>
|
||||
<ExecutablePath>$(ProjectDir)..\..;$(ExecutablePath)</ExecutablePath>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
|
||||
<LinkIncremental>false</LinkIncremental>
|
||||
<ExecutablePath>$(ProjectDir)..\..;$(ExecutablePath)</ExecutablePath>
|
||||
</PropertyGroup>
|
||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
|
||||
<ClCompile>
|
||||
<PrecompiledHeader>
|
||||
</PrecompiledHeader>
|
||||
<WarningLevel>Level3</WarningLevel>
|
||||
<Optimization>Disabled</Optimization>
|
||||
<PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<AdditionalIncludeDirectories>$(TargetDir)</AdditionalIncludeDirectories>
|
||||
<IntrinsicFunctions>true</IntrinsicFunctions>
|
||||
<FloatingPointModel>Fast</FloatingPointModel>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
|
||||
<ClCompile>
|
||||
<PrecompiledHeader>
|
||||
</PrecompiledHeader>
|
||||
<WarningLevel>Level3</WarningLevel>
|
||||
<Optimization>Disabled</Optimization>
|
||||
<PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<AdditionalIncludeDirectories>$(TargetDir)</AdditionalIncludeDirectories>
|
||||
<IntrinsicFunctions>true</IntrinsicFunctions>
|
||||
<FloatingPointModel>Fast</FloatingPointModel>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
|
||||
<ClCompile>
|
||||
<WarningLevel>Level3</WarningLevel>
|
||||
<PrecompiledHeader>
|
||||
</PrecompiledHeader>
|
||||
<Optimization>MaxSpeed</Optimization>
|
||||
<FunctionLevelLinking>true</FunctionLevelLinking>
|
||||
<IntrinsicFunctions>true</IntrinsicFunctions>
|
||||
<PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<AdditionalIncludeDirectories>$(TargetDir)</AdditionalIncludeDirectories>
|
||||
<FloatingPointModel>Fast</FloatingPointModel>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
<EnableCOMDATFolding>true</EnableCOMDATFolding>
|
||||
<OptimizeReferences>true</OptimizeReferences>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
|
||||
<ClCompile>
|
||||
<WarningLevel>Level3</WarningLevel>
|
||||
<PrecompiledHeader>
|
||||
</PrecompiledHeader>
|
||||
<Optimization>MaxSpeed</Optimization>
|
||||
<FunctionLevelLinking>true</FunctionLevelLinking>
|
||||
<IntrinsicFunctions>true</IntrinsicFunctions>
|
||||
<PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<AdditionalIncludeDirectories>$(TargetDir)</AdditionalIncludeDirectories>
|
||||
<FloatingPointModel>Fast</FloatingPointModel>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
<EnableCOMDATFolding>true</EnableCOMDATFolding>
|
||||
<OptimizeReferences>true</OptimizeReferences>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<Import Project="..\common.props" />
|
||||
<ItemGroup>
|
||||
<ClCompile Include="common.cpp" />
|
||||
<ClCompile Include="dynamic_c.cpp" />
|
||||
@@ -155,24 +33,4 @@
|
||||
<ClCompile Include="main.cpp" />
|
||||
<ClCompile Include="../tasksys.cpp" />
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<CustomBuild Include="kernels.ispc">
|
||||
<FileType>Document</FileType>
|
||||
<Command Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">ispc -O2 %(Filename).ispc -o $(TargetDir)%(Filename).obj -h $(TargetDir)%(Filename)_ispc.h --arch=x86 --target=sse2,sse4-x2,avx-x2
|
||||
</Command>
|
||||
<Command Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">ispc -O2 %(Filename).ispc -o $(TargetDir)%(Filename).obj -h $(TargetDir)%(Filename)_ispc.h --target=sse2,sse4-x2,avx-x2
|
||||
</Command>
|
||||
<Outputs Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">$(TargetDir)%(Filename).obj;$(TargetDir)%(Filename)_sse2.obj;$(TargetDir)%(Filename)_sse4.obj;$(TargetDir)%(Filename)_avx.obj;$(TargetDir)%(Filename)_ispc.h</Outputs>
|
||||
<Outputs Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">$(TargetDir)%(Filename).obj;$(TargetDir)%(Filename)_sse2.obj;$(TargetDir)%(Filename)_sse4.obj;$(TargetDir)%(Filename)_avx.obj;$(TargetDir)%(Filename)_ispc.h</Outputs>
|
||||
<Command Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">ispc -O2 %(Filename).ispc -o $(TargetDir)%(Filename).obj -h $(TargetDir)%(Filename)_ispc.h --arch=x86 --target=sse2,sse4-x2,avx-x2
|
||||
</Command>
|
||||
<Command Condition="'$(Configuration)|$(Platform)'=='Release|x64'">ispc -O2 %(Filename).ispc -o $(TargetDir)%(Filename).obj -h $(TargetDir)%(Filename)_ispc.h --target=sse2,sse4-x2,avx-x2
|
||||
</Command>
|
||||
<Outputs Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">$(TargetDir)%(Filename).obj;$(TargetDir)%(Filename)_sse2.obj;$(TargetDir)%(Filename)_sse4.obj;$(TargetDir)%(Filename)_avx.obj;$(TargetDir)%(Filename)_ispc.h</Outputs>
|
||||
<Outputs Condition="'$(Configuration)|$(Platform)'=='Release|x64'">$(TargetDir)%(Filename).obj;$(TargetDir)%(Filename)_sse2.obj;$(TargetDir)%(Filename)_sse4.obj;$(TargetDir)%(Filename)_avx.obj;$(TargetDir)%(Filename)_ispc.h</Outputs>
|
||||
</CustomBuild>
|
||||
</ItemGroup>
|
||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
|
||||
<ImportGroup Label="ExtensionTargets">
|
||||
</ImportGroup>
|
||||
</Project>
|
||||
|
||||
@@ -35,35 +35,35 @@
|
||||
|
||||
struct InputDataArrays
|
||||
{
|
||||
uniform float * uniform zBuffer;
|
||||
uniform unsigned int16 * uniform normalEncoded_x; // half float
|
||||
uniform unsigned int16 * uniform normalEncoded_y; // half float
|
||||
uniform unsigned int16 * uniform specularAmount; // half float
|
||||
uniform unsigned int16 * uniform specularPower; // half float
|
||||
uniform unsigned int8 * uniform albedo_x; // unorm8
|
||||
uniform unsigned int8 * uniform albedo_y; // unorm8
|
||||
uniform unsigned int8 * uniform albedo_z; // unorm8
|
||||
uniform float * uniform lightPositionView_x;
|
||||
uniform float * uniform lightPositionView_y;
|
||||
uniform float * uniform lightPositionView_z;
|
||||
uniform float * uniform lightAttenuationBegin;
|
||||
uniform float * uniform lightColor_x;
|
||||
uniform float * uniform lightColor_y;
|
||||
uniform float * uniform lightColor_z;
|
||||
uniform float * uniform lightAttenuationEnd;
|
||||
float *zBuffer;
|
||||
unsigned int16 *normalEncoded_x; // half float
|
||||
unsigned int16 *normalEncoded_y; // half float
|
||||
unsigned int16 *specularAmount; // half float
|
||||
unsigned int16 *specularPower; // half float
|
||||
unsigned int8 *albedo_x; // unorm8
|
||||
unsigned int8 *albedo_y; // unorm8
|
||||
unsigned int8 *albedo_z; // unorm8
|
||||
float *lightPositionView_x;
|
||||
float *lightPositionView_y;
|
||||
float *lightPositionView_z;
|
||||
float *lightAttenuationBegin;
|
||||
float *lightColor_x;
|
||||
float *lightColor_y;
|
||||
float *lightColor_z;
|
||||
float *lightAttenuationEnd;
|
||||
};
|
||||
|
||||
struct InputHeader
|
||||
{
|
||||
uniform float cameraProj[4][4];
|
||||
uniform float cameraNear;
|
||||
uniform float cameraFar;
|
||||
float cameraProj[4][4];
|
||||
float cameraNear;
|
||||
float cameraFar;
|
||||
|
||||
uniform int32 framebufferWidth;
|
||||
uniform int32 framebufferHeight;
|
||||
uniform int32 numLights;
|
||||
uniform int32 inputDataChunkSize;
|
||||
uniform int32 inputDataArrayOffsets[idaNum];
|
||||
int32 framebufferWidth;
|
||||
int32 framebufferHeight;
|
||||
int32 numLights;
|
||||
int32 inputDataChunkSize;
|
||||
int32 inputDataArrayOffsets[idaNum];
|
||||
};
|
||||
|
||||
|
||||
@@ -158,38 +158,22 @@ IntersectLightsWithTileMinMax(
|
||||
uniform float gBufferScale_x = 0.5f * (float)gBufferWidth;
|
||||
uniform float gBufferScale_y = 0.5f * (float)gBufferHeight;
|
||||
|
||||
// Parallize across frustum planes.
|
||||
// We really only have four side planes here, but write the code to
|
||||
// handle programCount > 4 robustly
|
||||
uniform float frustumPlanes_xy[programCount];
|
||||
uniform float frustumPlanes_z[programCount];
|
||||
uniform float frustumPlanes_xy[4] = {
|
||||
-(cameraProj_11 * gBufferScale_x),
|
||||
(cameraProj_11 * gBufferScale_x),
|
||||
(cameraProj_22 * gBufferScale_y),
|
||||
-(cameraProj_22 * gBufferScale_y) };
|
||||
uniform float frustumPlanes_z[4] = {
|
||||
tileEndX - gBufferScale_x,
|
||||
-tileStartX + gBufferScale_x,
|
||||
tileEndY - gBufferScale_y,
|
||||
-tileStartY + gBufferScale_y };
|
||||
|
||||
// TODO: If programIndex < 4 here? Don't care about masking off the
|
||||
// rest but if interleaving ("x2" modes) the other lanes should ideally
|
||||
// not be emitted...
|
||||
{
|
||||
// This one is totally constant over the whole screen... worth pulling it up at all?
|
||||
float frustumPlanes_xy_v;
|
||||
frustumPlanes_xy_v = insert(frustumPlanes_xy_v, 0, -(cameraProj_11 * gBufferScale_x));
|
||||
frustumPlanes_xy_v = insert(frustumPlanes_xy_v, 1, (cameraProj_11 * gBufferScale_x));
|
||||
frustumPlanes_xy_v = insert(frustumPlanes_xy_v, 2, (cameraProj_22 * gBufferScale_y));
|
||||
frustumPlanes_xy_v = insert(frustumPlanes_xy_v, 3, -(cameraProj_22 * gBufferScale_y));
|
||||
|
||||
float frustumPlanes_z_v;
|
||||
frustumPlanes_z_v = insert(frustumPlanes_z_v, 0, tileEndX - gBufferScale_x);
|
||||
frustumPlanes_z_v = insert(frustumPlanes_z_v, 1, -tileStartX + gBufferScale_x);
|
||||
frustumPlanes_z_v = insert(frustumPlanes_z_v, 2, tileEndY - gBufferScale_y);
|
||||
frustumPlanes_z_v = insert(frustumPlanes_z_v, 3, -tileStartY + gBufferScale_y);
|
||||
|
||||
// Normalize
|
||||
float norm = rsqrt(frustumPlanes_xy_v * frustumPlanes_xy_v +
|
||||
frustumPlanes_z_v * frustumPlanes_z_v);
|
||||
frustumPlanes_xy_v *= norm;
|
||||
frustumPlanes_z_v *= norm;
|
||||
|
||||
// Save out for uniform use later
|
||||
frustumPlanes_xy[programIndex] = frustumPlanes_xy_v;
|
||||
frustumPlanes_z[programIndex] = frustumPlanes_z_v;
|
||||
for (uniform int i = 0; i < 4; ++i) {
|
||||
uniform float norm = rsqrt(frustumPlanes_xy[i] * frustumPlanes_xy[i] +
|
||||
frustumPlanes_z[i] * frustumPlanes_z[i]);
|
||||
frustumPlanes_xy[i] *= norm;
|
||||
frustumPlanes_z[i] *= norm;
|
||||
}
|
||||
|
||||
uniform int32 tileNumLights = 0;
|
||||
@@ -343,8 +327,8 @@ ShadeTile(
|
||||
|
||||
// Reconstruct normal from G-buffer
|
||||
float surface_normal_x, surface_normal_y, surface_normal_z;
|
||||
float normal_x = half_to_float_fast(inputData.normalEncoded_x[gBufferOffset]);
|
||||
float normal_y = half_to_float_fast(inputData.normalEncoded_y[gBufferOffset]);
|
||||
float normal_x = half_to_float(inputData.normalEncoded_x[gBufferOffset]);
|
||||
float normal_y = half_to_float(inputData.normalEncoded_y[gBufferOffset]);
|
||||
|
||||
float f = (normal_x - normal_x * normal_x) + (normal_y - normal_y * normal_y);
|
||||
float m = sqrt(4.0f * f - 1.0f);
|
||||
@@ -355,9 +339,9 @@ ShadeTile(
|
||||
|
||||
// Load other G-buffer parameters
|
||||
float surface_specularAmount =
|
||||
half_to_float_fast(inputData.specularAmount[gBufferOffset]);
|
||||
half_to_float(inputData.specularAmount[gBufferOffset]);
|
||||
float surface_specularPower =
|
||||
half_to_float_fast(inputData.specularPower[gBufferOffset]);
|
||||
half_to_float(inputData.specularPower[gBufferOffset]);
|
||||
float surface_albedo_x = Unorm8ToFloat32(inputData.albedo_x[gBufferOffset]);
|
||||
float surface_albedo_y = Unorm8ToFloat32(inputData.albedo_y[gBufferOffset]);
|
||||
float surface_albedo_z = Unorm8ToFloat32(inputData.albedo_z[gBufferOffset]);
|
||||
@@ -530,9 +514,9 @@ RenderStatic(uniform InputHeader &inputHeader,
|
||||
|
||||
// Launch a task to render each tile, each of which is MIN_TILE_WIDTH
|
||||
// by MIN_TILE_HEIGHT pixels.
|
||||
launch[num_groups] < RenderTile(num_groups_x, num_groups_y,
|
||||
inputHeader, inputData, visualizeLightCount,
|
||||
framebuffer_r, framebuffer_g, framebuffer_b) >;
|
||||
launch[num_groups] RenderTile(num_groups_x, num_groups_y,
|
||||
inputHeader, inputData, visualizeLightCount,
|
||||
framebuffer_r, framebuffer_g, framebuffer_b);
|
||||
}
|
||||
|
||||
|
||||
@@ -591,8 +575,6 @@ SplitTileMinMax(
|
||||
uniform float light_positionView_z_array[],
|
||||
uniform float light_attenuationEnd_array[],
|
||||
// Outputs
|
||||
// TODO: ISPC doesn't currently like multidimensionsal arrays so we'll do the
|
||||
// indexing math ourselves
|
||||
uniform int32 subtileIndices[],
|
||||
uniform int32 subtileIndicesPitch,
|
||||
uniform int32 subtileNumLights[]
|
||||
@@ -601,30 +583,20 @@ SplitTileMinMax(
|
||||
uniform float gBufferScale_x = 0.5f * (float)gBufferWidth;
|
||||
uniform float gBufferScale_y = 0.5f * (float)gBufferHeight;
|
||||
|
||||
// Parallize across frustum planes
|
||||
// Only have 2 frustum split planes here so may not be worth it, but
|
||||
// we'll do it for now for consistency
|
||||
uniform float frustumPlanes_xy[programCount];
|
||||
uniform float frustumPlanes_z[programCount];
|
||||
|
||||
// This one is totally constant over the whole screen... worth pulling it up at all?
|
||||
float frustumPlanes_xy_v;
|
||||
frustumPlanes_xy_v = insert(frustumPlanes_xy_v, 0, -(cameraProj_11 * gBufferScale_x));
|
||||
frustumPlanes_xy_v = insert(frustumPlanes_xy_v, 1, (cameraProj_22 * gBufferScale_y));
|
||||
|
||||
float frustumPlanes_z_v;
|
||||
frustumPlanes_z_v = insert(frustumPlanes_z_v, 0, tileMidX - gBufferScale_x);
|
||||
frustumPlanes_z_v = insert(frustumPlanes_z_v, 1, tileMidY - gBufferScale_y);
|
||||
uniform float frustumPlanes_xy[2] = { -(cameraProj_11 * gBufferScale_x),
|
||||
(cameraProj_22 * gBufferScale_y) };
|
||||
uniform float frustumPlanes_z[2] = { tileMidX - gBufferScale_x,
|
||||
tileMidY - gBufferScale_y };
|
||||
|
||||
// Normalize
|
||||
float norm = rsqrt(frustumPlanes_xy_v * frustumPlanes_xy_v +
|
||||
frustumPlanes_z_v * frustumPlanes_z_v);
|
||||
frustumPlanes_xy_v *= norm;
|
||||
frustumPlanes_z_v *= norm;
|
||||
|
||||
// Save out for uniform use later
|
||||
frustumPlanes_xy[programIndex] = frustumPlanes_xy_v;
|
||||
frustumPlanes_z[programIndex] = frustumPlanes_z_v;
|
||||
uniform float norm[2] = { rsqrt(frustumPlanes_xy[0] * frustumPlanes_xy[0] +
|
||||
frustumPlanes_z[0] * frustumPlanes_z[0]),
|
||||
rsqrt(frustumPlanes_xy[1] * frustumPlanes_xy[1] +
|
||||
frustumPlanes_z[1] * frustumPlanes_z[1]) };
|
||||
frustumPlanes_xy[0] *= norm[0];
|
||||
frustumPlanes_xy[1] *= norm[1];
|
||||
frustumPlanes_z[0] *= norm[0];
|
||||
frustumPlanes_z[1] *= norm[1];
|
||||
|
||||
// Initialize
|
||||
uniform int32 subtileLightOffset[4];
|
||||
|
||||
@@ -62,10 +62,16 @@
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
if (argc != 2) {
|
||||
printf("usage: deferred_shading <input_file (e.g. data/pp1280x720.bin)>\n");
|
||||
if (argc < 2) {
|
||||
printf("usage: deferred_shading <input_file (e.g. data/pp1280x720.bin)> [tasks iterations] [serial iterations]\n");
|
||||
return 1;
|
||||
}
|
||||
static unsigned int test_iterations[] = {5, 3, 500}; //last value is for nframes, it is scale.
|
||||
if (argc == 5) {
|
||||
for (int i = 0; i < 3; i++) {
|
||||
test_iterations[i] = atoi(argv[2 + i]);
|
||||
}
|
||||
}
|
||||
|
||||
InputData *input = CreateInputDataFromFile(argv[1]);
|
||||
if (!input) {
|
||||
@@ -81,16 +87,17 @@ int main(int argc, char** argv) {
|
||||
InitDynamicCilk(input);
|
||||
#endif // __cilk
|
||||
|
||||
int nframes = 5;
|
||||
int nframes = test_iterations[2];
|
||||
double ispcCycles = 1e30;
|
||||
for (int i = 0; i < 5; ++i) {
|
||||
for (unsigned int i = 0; i < test_iterations[0]; ++i) {
|
||||
framebuffer.clear();
|
||||
reset_and_start_timer();
|
||||
for (int j = 0; j < nframes; ++j)
|
||||
ispc::RenderStatic(&input->header, &input->arrays,
|
||||
ispc::RenderStatic(input->header, input->arrays,
|
||||
VISUALIZE_LIGHT_COUNT,
|
||||
framebuffer.r, framebuffer.g, framebuffer.b);
|
||||
double mcycles = get_elapsed_mcycles() / nframes;
|
||||
printf("@time of ISPC + TASKS run:\t\t\t[%.3f] million cycles\n", mcycles);
|
||||
ispcCycles = std::min(ispcCycles, mcycles);
|
||||
}
|
||||
printf("[ispc static + tasks]:\t\t[%.3f] million cycles to render "
|
||||
@@ -98,14 +105,16 @@ int main(int argc, char** argv) {
|
||||
input->header.framebufferWidth, input->header.framebufferHeight);
|
||||
WriteFrame("deferred-ispc-static.ppm", input, framebuffer);
|
||||
|
||||
nframes = 3;
|
||||
#ifdef __cilk
|
||||
double dynamicCilkCycles = 1e30;
|
||||
for (int i = 0; i < 5; ++i) {
|
||||
for (int i = 0; i < test_iterations[1]; ++i) {
|
||||
framebuffer.clear();
|
||||
reset_and_start_timer();
|
||||
for (int j = 0; j < nframes; ++j)
|
||||
DispatchDynamicCilk(input, &framebuffer);
|
||||
double mcycles = get_elapsed_mcycles() / nframes;
|
||||
printf("@time of serial run:\t\t\t[%.3f] million cycles\n", mcycles);
|
||||
dynamicCilkCycles = std::min(dynamicCilkCycles, mcycles);
|
||||
}
|
||||
printf("[ispc + Cilk dynamic]:\t\t[%.3f] million cycles to render image\n",
|
||||
@@ -114,12 +123,13 @@ int main(int argc, char** argv) {
|
||||
#endif // __cilk
|
||||
|
||||
double serialCycles = 1e30;
|
||||
for (int i = 0; i < 5; ++i) {
|
||||
for (unsigned int i = 0; i < test_iterations[1]; ++i) {
|
||||
framebuffer.clear();
|
||||
reset_and_start_timer();
|
||||
for (int j = 0; j < nframes; ++j)
|
||||
DispatchDynamicC(input, &framebuffer);
|
||||
double mcycles = get_elapsed_mcycles() / nframes;
|
||||
printf("@time of serial run:\t\t\t[%.3f] million cycles\n", mcycles);
|
||||
serialCycles = std::min(serialCycles, mcycles);
|
||||
}
|
||||
printf("[C++ serial dynamic, 1 core]:\t[%.3f] million cycles to render image\n",
|
||||
@@ -130,7 +140,7 @@ int main(int argc, char** argv) {
|
||||
printf("\t\t\t\t(%.2fx speedup from static ISPC, %.2fx from Cilk+ISPC)\n",
|
||||
serialCycles/ispcCycles, serialCycles/dynamicCilkCycles);
|
||||
#else
|
||||
printf("\t\t\t\t(%.2fx speedup from ISPC)\n", serialCycles/ispcCycles);
|
||||
printf("\t\t\t\t(%.2fx speedup from ISPC + tasks)\n", serialCycles/ispcCycles);
|
||||
#endif // __cilk
|
||||
|
||||
DeleteInputData(input);
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
|
||||
Microsoft Visual Studio Solution File, Format Version 11.00
|
||||
# Visual Studio 2010
|
||||
Microsoft Visual Studio Solution File, Format Version 12.00
|
||||
# Visual Studio 2012
|
||||
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "simple", "simple\simple.vcxproj", "{947C5311-8B78-4D05-BEE4-BCF342D4B367}"
|
||||
EndProject
|
||||
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "rt", "rt\rt.vcxproj", "{E787BC3F-2D2E-425E-A64D-4721E2FF3DC9}"
|
||||
@@ -23,6 +23,10 @@ Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "stencil", "stencil\stencil.
|
||||
EndProject
|
||||
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "deferred_shading", "deferred\deferred_shading.vcxproj", "{87F53C53-957E-4E91-878A-BC27828FB9EB}"
|
||||
EndProject
|
||||
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "perfbench", "perfbench\perfbench.vcxproj", "{D923BB7E-A7C8-4850-8FCF-0EB9CE35B4E8}"
|
||||
EndProject
|
||||
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "sort", "sort\sort.vcxproj", "{6D3EF8C5-AE26-407B-9ECE-C27CB988D9C2}"
|
||||
EndProject
|
||||
Global
|
||||
GlobalSection(SolutionConfigurationPlatforms) = preSolution
|
||||
Debug|Win32 = Debug|Win32
|
||||
@@ -119,6 +123,22 @@ Global
|
||||
{87F53C53-957E-4E91-878A-BC27828FB9EB}.Release|Win32.Build.0 = Release|Win32
|
||||
{87F53C53-957E-4E91-878A-BC27828FB9EB}.Release|x64.ActiveCfg = Release|x64
|
||||
{87F53C53-957E-4E91-878A-BC27828FB9EB}.Release|x64.Build.0 = Release|x64
|
||||
{D923BB7E-A7C8-4850-8FCF-0EB9CE35B4E8}.Debug|Win32.ActiveCfg = Debug|Win32
|
||||
{D923BB7E-A7C8-4850-8FCF-0EB9CE35B4E8}.Debug|Win32.Build.0 = Debug|Win32
|
||||
{D923BB7E-A7C8-4850-8FCF-0EB9CE35B4E8}.Debug|x64.ActiveCfg = Debug|x64
|
||||
{D923BB7E-A7C8-4850-8FCF-0EB9CE35B4E8}.Debug|x64.Build.0 = Debug|x64
|
||||
{D923BB7E-A7C8-4850-8FCF-0EB9CE35B4E8}.Release|Win32.ActiveCfg = Release|Win32
|
||||
{D923BB7E-A7C8-4850-8FCF-0EB9CE35B4E8}.Release|Win32.Build.0 = Release|Win32
|
||||
{D923BB7E-A7C8-4850-8FCF-0EB9CE35B4E8}.Release|x64.ActiveCfg = Release|x64
|
||||
{D923BB7E-A7C8-4850-8FCF-0EB9CE35B4E8}.Release|x64.Build.0 = Release|x64
|
||||
{6D3EF8C5-AE26-407B-9ECE-C27CB988D9C2}.Debug|Win32.ActiveCfg = Debug|Win32
|
||||
{6D3EF8C5-AE26-407B-9ECE-C27CB988D9C2}.Debug|Win32.Build.0 = Debug|Win32
|
||||
{6D3EF8C5-AE26-407B-9ECE-C27CB988D9C2}.Debug|x64.ActiveCfg = Debug|x64
|
||||
{6D3EF8C5-AE26-407B-9ECE-C27CB988D9C2}.Debug|x64.Build.0 = Debug|x64
|
||||
{6D3EF8C5-AE26-407B-9ECE-C27CB988D9C2}.Release|Win32.ActiveCfg = Release|Win32
|
||||
{6D3EF8C5-AE26-407B-9ECE-C27CB988D9C2}.Release|Win32.Build.0 = Release|Win32
|
||||
{6D3EF8C5-AE26-407B-9ECE-C27CB988D9C2}.Release|x64.ActiveCfg = Release|x64
|
||||
{6D3EF8C5-AE26-407B-9ECE-C27CB988D9C2}.Release|x64.Build.0 = Release|x64
|
||||
EndGlobalSection
|
||||
GlobalSection(SolutionProperties) = preSolution
|
||||
HideSolutionNode = FALSE
|
||||
|
||||
9
examples/gmres/Makefile
Normal file
9
examples/gmres/Makefile
Normal file
@@ -0,0 +1,9 @@
|
||||
|
||||
EXAMPLE=gmres
|
||||
CPP_SRC=algorithm.cpp main.cpp matrix.cpp
|
||||
CC_SRC=mmio.c
|
||||
ISPC_SRC=matrix.ispc
|
||||
ISPC_IA_TARGETS=sse2-i32x4,sse4-i32x8,avx1-i32x16,avx2-i32x16,avx512knl-i32x16,avx512skx-i32x16
|
||||
ISPC_ARM_TARGETS=neon
|
||||
|
||||
include ../common.mk
|
||||
231
examples/gmres/algorithm.cpp
Normal file
231
examples/gmres/algorithm.cpp
Normal file
@@ -0,0 +1,231 @@
|
||||
/*
|
||||
Copyright (c) 2012, Intel Corporation
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
|
||||
* Neither the name of Intel Corporation nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
||||
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
||||
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
|
||||
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
|
||||
OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
|
||||
/*===========================================================================*\
|
||||
|* Includes
|
||||
\*===========================================================================*/
|
||||
#include "algorithm.h"
|
||||
#include "stdio.h"
|
||||
#include "debug.h"
|
||||
|
||||
|
||||
/*===========================================================================*\
|
||||
|* GMRES
|
||||
\*===========================================================================*/
|
||||
/* upper_triangular_right_solve:
|
||||
* ----------------------------
|
||||
* Given upper triangular matrix R and rhs vector b, solve for
|
||||
* x. This "solve" ignores the rows, columns of R that are greater than the
|
||||
* dimensions of x.
|
||||
*/
|
||||
void upper_triangular_right_solve (const DenseMatrix &R, const Vector &b, Vector &x)
|
||||
{
|
||||
// Dimensionality check
|
||||
ASSERT(R.rows() >= b.size());
|
||||
ASSERT(R.cols() >= x.size());
|
||||
ASSERT(b.size() >= x.size());
|
||||
|
||||
int max_row = x.size() - 1;
|
||||
|
||||
// first solve step:
|
||||
x[max_row] = b[max_row] / R(max_row, max_row);
|
||||
|
||||
for (int row = max_row - 1; row >= 0; row--) {
|
||||
double xi = b[row];
|
||||
for (int col = max_row; col > row; col--)
|
||||
xi -= x[col] * R(row, col);
|
||||
x[row] = xi / R(row, row);
|
||||
}
|
||||
}
|
||||
|
||||
/* create_rotation (used in gmres):
|
||||
* -------------------------------
|
||||
* Construct a Givens rotation to zero out the lowest non-zero entry in a partially
|
||||
* factored Hessenburg matrix. Note that the previous Givens rotations should be
|
||||
* applied to this column before creating a new rotation.
|
||||
*/
|
||||
void create_rotation (const DenseMatrix &H, size_t col, Vector &Cn, Vector &Sn)
|
||||
{
|
||||
double a = H(col, col);
|
||||
double b = H(col + 1, col);
|
||||
double r;
|
||||
|
||||
if (b == 0) {
|
||||
Cn[col] = copysign(1, a);
|
||||
Sn[col] = 0;
|
||||
}
|
||||
else if (a == 0) {
|
||||
Cn[col] = 0;
|
||||
Sn[col] = copysign(1, b);
|
||||
}
|
||||
else {
|
||||
r = sqrt(a*a + b*b);
|
||||
Sn[col] = -b / r;
|
||||
Cn[col] = a / r;
|
||||
}
|
||||
}
|
||||
|
||||
/* Applies the 'col'th Givens rotation stored in vectors Sn and Cn to the 'col'th
|
||||
* column of the DenseMatrix M. (Previous columns don't need the rotation applied b/c
|
||||
* presumeably, the first col-1 columns are already upper triangular, and so their
|
||||
* entries in the col and col+1 rows are 0.)
|
||||
*/
|
||||
void apply_rotation (DenseMatrix &H, size_t col, Vector &Cn, Vector &Sn)
|
||||
{
|
||||
double c = Cn[col];
|
||||
double s = Sn[col];
|
||||
double tmp = c * H(col, col) - s * H(col+1, col);
|
||||
H(col+1, col) = s * H(col, col) + c * H(col+1, col);
|
||||
H(col, col) = tmp;
|
||||
}
|
||||
|
||||
/* Applies the 'col'th Givens rotation to the vector.
|
||||
*/
|
||||
void apply_rotation (Vector &v, size_t col, Vector &Cn, Vector &Sn)
|
||||
{
|
||||
double a = v[col];
|
||||
double b = v[col + 1];
|
||||
|
||||
double c = Cn[col];
|
||||
double s = Sn[col];
|
||||
|
||||
v[col] = c * a - s * b;
|
||||
v[col + 1] = s * a + c * b;
|
||||
}
|
||||
|
||||
/* Applies the first 'col' Givens rotations to the newly-created column
|
||||
* of H. (Leaves other columns alone.)
|
||||
*/
|
||||
void update_column (DenseMatrix &H, size_t col, Vector &Cn, Vector &Sn)
|
||||
{
|
||||
for (int i = 0; i < col; i++) {
|
||||
double c = Cn[i];
|
||||
double s = Sn[i];
|
||||
double t = c * H(i,col) - s * H(i+1,col);
|
||||
H(i+1, col) = s * H(i,col) + c * H(i+1,col);
|
||||
H(i, col) = t;
|
||||
}
|
||||
}
|
||||
|
||||
/* After a new column has been added to the hessenburg matrix, factor it back into
|
||||
* an upper-triangular matrix by:
|
||||
* - applying the previous Givens rotations to the new column
|
||||
* - computing the new Givens rotation to make the column upper triangluar
|
||||
* - applying the new Givens rotation to the column, and
|
||||
* - applying the new Givens rotation to the solution vector
|
||||
*/
|
||||
void update_qr_decomp (DenseMatrix &H, Vector &s, size_t col, Vector &Cn, Vector &Sn)
|
||||
{
|
||||
update_column( H, col, Cn, Sn);
|
||||
create_rotation(H, col, Cn, Sn);
|
||||
apply_rotation( H, col, Cn, Sn);
|
||||
apply_rotation( s, col, Cn, Sn);
|
||||
}
|
||||
|
||||
void gmres (const Matrix &A, const Vector &b, Vector &x, int num_iters, double max_err)
|
||||
{
|
||||
DEBUG_PRINT("gmres starting!\n");
|
||||
x.zero();
|
||||
|
||||
ASSERT(A.rows() == A.cols());
|
||||
DenseMatrix Qstar(num_iters + 1, A.rows());
|
||||
DenseMatrix H(num_iters + 1, num_iters);
|
||||
|
||||
// arrays for storing parameters of givens rotations
|
||||
Vector Sn(num_iters);
|
||||
Vector Cn(num_iters);
|
||||
|
||||
// array for storing the rhs projected onto the hessenburg's column space
|
||||
Vector G(num_iters+1);
|
||||
G.zero();
|
||||
|
||||
double beta = b.norm();
|
||||
G[0] = beta;
|
||||
|
||||
// temp vector, stores Aqi
|
||||
Vector w(A.rows());
|
||||
|
||||
w.copy(b);
|
||||
w.normalize();
|
||||
Qstar.set_row(0, w);
|
||||
|
||||
int iter = 0;
|
||||
Vector temp(A.rows(), false);
|
||||
double rel_err;
|
||||
|
||||
while (iter < num_iters)
|
||||
{
|
||||
// w = Aqi
|
||||
Qstar.row(iter, temp);
|
||||
A.multiply(temp, w);
|
||||
|
||||
// construct ith column of H, i+1th row of Qstar:
|
||||
for (int row = 0; row <= iter; row++) {
|
||||
Qstar.row(row, temp);
|
||||
H(row, iter) = temp.dot(w);
|
||||
w.add_ax(-H(row, iter), temp);
|
||||
}
|
||||
|
||||
H(iter+1, iter) = w.norm();
|
||||
w.divide(H(iter+1, iter));
|
||||
Qstar.set_row(iter+1, w);
|
||||
|
||||
update_qr_decomp (H, G, iter, Cn, Sn);
|
||||
|
||||
rel_err = fabs(G[iter+1] / beta);
|
||||
|
||||
if (rel_err < max_err)
|
||||
break;
|
||||
|
||||
if (iter % 100 == 0)
|
||||
DEBUG_PRINT("Iter %d: %f err\n", iter, rel_err);
|
||||
|
||||
iter++;
|
||||
}
|
||||
|
||||
if (iter == num_iters) {
|
||||
fprintf(stderr, "Error: gmres failed to converge in %d iterations (relative err: %f)\n", num_iters, rel_err);
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
// We've reached an acceptable solution (?):
|
||||
|
||||
DEBUG_PRINT("gmres completed in %d iterations (rel. resid. %f, max %f)\n", num_iters, rel_err, max_err);
|
||||
Vector y(iter+1);
|
||||
upper_triangular_right_solve(H, G, y);
|
||||
for (int i = 0; i < iter + 1; i++) {
|
||||
Qstar.row(i, temp);
|
||||
x.add_ax(y[i], temp);
|
||||
}
|
||||
}
|
||||
50
examples/gmres/algorithm.h
Normal file
50
examples/gmres/algorithm.h
Normal file
@@ -0,0 +1,50 @@
|
||||
/*
|
||||
Copyright (c) 2012, Intel Corporation
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
|
||||
* Neither the name of Intel Corporation nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
||||
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
||||
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
|
||||
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
|
||||
OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
|
||||
#ifndef __ALGORITHM_H__
|
||||
#define __ALGORITHM_H__
|
||||
|
||||
#include "matrix.h"
|
||||
|
||||
|
||||
/* Generalized Minimal Residual Method:
|
||||
* -----------------------------------
|
||||
* Takes a square matrix and an rhs and uses GMRES to find an estimate for x.
|
||||
* The specified error is relative.
|
||||
*/
|
||||
void gmres (const Matrix &A, const Vector &b, Vector &x, int num_iters, double err);
|
||||
|
||||
|
||||
|
||||
#endif
|
||||
8671
examples/gmres/data/c-18/c-18.mtx
Normal file
8671
examples/gmres/data/c-18/c-18.mtx
Normal file
File diff suppressed because it is too large
Load Diff
2176
examples/gmres/data/c-18/c-18_b.mtx
Normal file
2176
examples/gmres/data/c-18/c-18_b.mtx
Normal file
File diff suppressed because it is too large
Load Diff
17847
examples/gmres/data/c-21/c-21.mtx
Normal file
17847
examples/gmres/data/c-21/c-21.mtx
Normal file
File diff suppressed because it is too large
Load Diff
3516
examples/gmres/data/c-21/c-21_b.mtx
Normal file
3516
examples/gmres/data/c-21/c-21_b.mtx
Normal file
File diff suppressed because it is too large
Load Diff
16346
examples/gmres/data/c-22/c-22.mtx
Normal file
16346
examples/gmres/data/c-22/c-22.mtx
Normal file
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user