Compare commits

..

291 Commits

Author SHA1 Message Date
011a1f3335 fix 2022-09-08 17:21:48 +02:00
9007489c51 comments 2022-09-08 16:53:43 +02:00
079b38f8de comments 2022-09-08 16:37:35 +02:00
3d1244c134 comments 2022-09-08 16:07:02 +02:00
4ce0711e67 comments 2022-09-08 15:43:16 +02:00
a0e1d123fc comments 2022-09-08 15:34:17 +02:00
6c0ec45daf cleanup 2022-09-08 13:19:57 +02:00
91a57c9cc2 cleanup 2022-09-08 13:18:30 +02:00
90edc4472e Merge branch 'master' into temp-geometry-nodes-evaluator-refactor 2022-09-08 12:55:44 +02:00
3f9f9f0348 cleanup 2022-09-08 12:39:06 +02:00
04c91471e0 cleanup 2022-09-08 12:23:00 +02:00
e356fe95fd cleanup 2022-09-08 12:17:05 +02:00
6e09d25657 bring back attribute search in node editor 2022-09-08 12:12:23 +02:00
a77491a6ae bring back used named attribute logging in modifier 2022-09-08 11:41:56 +02:00
a8a71d6b63 bring back used named attributes overlay 2022-09-08 11:39:36 +02:00
bf97ce6418 cleanup 2022-09-08 11:06:13 +02:00
69ab1bd73c cleanup 2022-09-08 11:01:37 +02:00
e599bb1793 cleanup 2022-09-08 10:54:44 +02:00
198f8c209f cleanup 2022-09-08 10:43:58 +02:00
2f1d60a481 improve compute context docs 2022-09-08 10:35:43 +02:00
1c37a515bc Merge branch 'master' into temp-geometry-nodes-evaluator-refactor 2022-09-08 09:50:40 +02:00
8429f01d8c fix attribute search 2022-09-07 17:29:44 +02:00
3aba2a3f9e Merge branch 'master' into temp-geometry-nodes-evaluator-refactor 2022-09-07 16:15:15 +02:00
b07660a2b9 cleanup 2022-09-07 12:53:52 +02:00
1b6d1fb1d6 move context stacks to separate header 2022-09-07 12:49:50 +02:00
f9434dbe59 use fewer allocators 2022-09-07 12:33:36 +02:00
aaf8c0d9d7 cleanup 2022-09-07 12:14:26 +02:00
ee3049e508 support logging field in viewer again 2022-09-07 12:10:08 +02:00
091e7fb735 support viewer geometry again 2022-09-07 11:44:08 +02:00
0b75698519 actually log data in viewer node 2022-09-07 11:21:35 +02:00
cdd045d269 cleanup logging 2022-09-07 11:06:24 +02:00
679a4d218b Merge branch 'master' into temp-geometry-nodes-evaluator-refactor 2022-09-07 10:55:06 +02:00
35caf245dd progress 2022-09-07 10:21:32 +02:00
45d36410a7 Merge branch 'master' into temp-geometry-nodes-evaluator-refactor 2022-09-07 09:12:15 +02:00
7b71ab3fe0 progress 2022-09-06 20:06:44 +02:00
d98c7a195f add lazy function for viewer node 2022-09-06 19:45:17 +02:00
a347508dcc simplify naming 2022-09-06 19:28:13 +02:00
cc08debc0c make nodes with side effects work 2022-09-06 19:24:13 +02:00
4e9515d23d gather side effect nodes 2022-09-06 18:18:14 +02:00
e7c95b8fde cleanup + add initial side effect provider 2022-09-06 18:12:59 +02:00
c36d8516b0 cleanup 2022-09-06 17:46:13 +02:00
2b9e3e4e5a Merge branch 'master' into temp-geometry-nodes-evaluator-refactor 2022-09-06 17:37:20 +02:00
46f0b0798a make logging optional 2022-09-04 12:49:07 +02:00
3548c5acfe cleanup 2022-09-04 12:40:00 +02:00
68c797c49e gather sockets to preview 2022-09-04 12:39:36 +02:00
dd5c704ee1 add utility context stack builder 2022-09-04 12:07:54 +02:00
9e2ff5b11a show more detailed socket inspection 2022-09-04 11:58:11 +02:00
a99fddf98e log more information of values 2022-09-04 11:44:18 +02:00
d73e2d5612 log less unnecessary data 2022-09-04 11:13:59 +02:00
d71d0e402f add logger to lazy function graph executor 2022-09-02 17:50:30 +02:00
8629bd863a log converted values 2022-09-02 15:44:59 +02:00
686d90a824 try find socket with logged value 2022-09-02 15:26:54 +02:00
f197490901 Merge branch 'master' into temp-geometry-nodes-evaluator-refactor 2022-09-02 14:46:53 +02:00
d38cc2fc53 Merge branch 'master' into temp-geometry-nodes-evaluator-refactor 2022-09-01 20:38:20 +02:00
80d6565b93 fix missing lazy function update with animation 2022-09-01 19:58:49 +02:00
75e658e6a0 Merge branch 'master' into temp-geometry-nodes-evaluator-refactor 2022-09-01 19:25:54 +02:00
4cfa580a68 fix multi input socket with muted link 2022-09-01 19:23:33 +02:00
db13aa8e43 refactor lazy function graph generation 2022-09-01 18:56:19 +02:00
d6639cfd00 cleanup naming 2022-09-01 12:57:12 +02:00
6ee52e2e8c Merge branch 'master' into temp-geometry-nodes-evaluator-refactor 2022-09-01 12:50:57 +02:00
a2a8b9e82f fixes 2022-08-31 15:58:20 +02:00
ad55174715 Merge branch 'master' into temp-geometry-nodes-evaluator-refactor 2022-08-31 15:45:58 +02:00
7d3f3c6fd7 fixes 2022-08-31 13:02:54 +02:00
280c039908 Merge branch 'master' into temp-geometry-nodes-evaluator-refactor 2022-08-31 12:34:03 +02:00
8ecd241502 improve text 2022-08-24 13:48:43 +02:00
4f1de2be9d cleanup 2022-08-24 13:33:52 +02:00
9f4db143f3 Merge branch 'master' into temp-geometry-nodes-evaluator-refactor 2022-08-24 13:17:38 +02:00
231cafa911 socket value logging progress 2022-08-23 13:39:23 +02:00
5523d90d9c progress 2022-08-23 13:06:18 +02:00
4d94c1a4e7 progress 2022-08-23 12:55:11 +02:00
8e2b11badf fixes 2022-08-23 12:26:54 +02:00
5dbbfc8cc3 Merge branch 'master' into temp-geometry-nodes-evaluator-refactor 2022-08-23 12:26:50 +02:00
cb091658a9 improve naming 2022-07-18 23:13:28 +02:00
f3a63be474 improve naming 2022-07-18 22:57:09 +02:00
98b5005787 fix 2022-07-18 22:33:07 +02:00
bf808496f8 fix 2022-07-18 22:22:16 +02:00
050a9f5f0d Merge branch 'master' into temp-geometry-nodes-evaluator-refactor 2022-07-18 22:20:30 +02:00
f5652ce6b2 show node timings 2022-06-30 23:31:15 +02:00
58bd9230a8 initial node timings support 2022-06-30 23:19:47 +02:00
8f710b104d cleanup logger naming 2022-06-30 22:27:09 +02:00
69aa085cd8 Merge branch 'master' into temp-geometry-nodes-evaluator-refactor 2022-06-30 22:16:27 +02:00
dbef967a7b progress 2022-06-26 14:56:56 +02:00
c2e101facb progress 2022-06-26 14:30:58 +02:00
f4164f27ab progress 2022-06-26 14:28:18 +02:00
ff11ece7a1 progress 2022-06-26 14:26:32 +02:00
d9fc777ce2 show warnings 2022-06-26 14:17:29 +02:00
a74097a150 cleanup 2022-06-26 14:07:06 +02:00
30ba454cbe cleanup 2022-06-26 13:58:39 +02:00
7adefbc04e progress 2022-06-26 13:44:57 +02:00
2c69f123ca cleanup 2022-06-26 12:46:02 +02:00
aaf8b97666 fixes 2022-06-26 12:44:18 +02:00
b6b2e91132 cleanup 2022-06-26 12:22:43 +02:00
0f9bfe501b cleanup 2022-06-26 12:08:57 +02:00
668f34d346 reduce redundant work 2022-06-26 11:58:32 +02:00
b570994b0e show node warnings again 2022-06-26 11:37:21 +02:00
cec0db55df cleanup 2022-06-26 10:29:24 +02:00
4c16d01222 Merge branch 'master' into lazy-function 2022-06-26 10:23:55 +02:00
460c970cab fix warning 2022-06-10 17:42:39 +02:00
14163d9ce0 fix 2022-06-10 17:24:06 +02:00
d4110b2728 Merge branch 'master' into lazy-function 2022-06-10 17:15:45 +02:00
7a5eee9710 Merge branch 'master' into lazy-function 2022-06-10 09:56:48 +02:00
9b598f0ab2 fix 2022-06-09 15:38:38 +02:00
1287fbe88b fix 2022-06-09 14:02:43 +02:00
464428b002 Merge branch 'master' into lazy-function 2022-06-09 14:02:26 +02:00
090555898c initial value logging 2022-06-01 11:02:04 +02:00
f1cd0ed6b8 remove old geometry nodes logger 2022-06-01 10:46:18 +02:00
aa0b2990f6 progress 2022-06-01 10:22:11 +02:00
9c23df35e4 progress 2022-06-01 10:09:22 +02:00
ad52352e6f Merge branch 'master' into lazy-function 2022-05-31 20:56:35 +02:00
67f46a8c05 pass context stack 2022-05-30 18:16:26 +02:00
b29e17bc89 fix 2022-05-30 17:59:01 +02:00
b10e74d85e cleanup 2022-05-30 17:47:21 +02:00
cef9625a2e split params from context 2022-05-30 17:25:03 +02:00
0c6d11c1be simplify naming 2022-05-30 16:50:32 +02:00
4f7ca91df1 simplify naming 2022-05-30 16:41:40 +02:00
45efd45ee0 simplify naming 2022-05-30 16:41:07 +02:00
48d97f9abb cleanup 2022-05-30 16:40:05 +02:00
0b1c529be3 simplify 2022-05-30 16:36:35 +02:00
0ff96301e9 Merge branch 'master' into lazy-function 2022-05-30 16:28:37 +02:00
ba163fcc30 progress 2022-05-30 16:26:59 +02:00
f3a412290c remove xxhash for now 2022-05-30 15:57:03 +02:00
04e2bf544a Merge branch 'master' into lazy-function 2022-05-30 15:34:26 +02:00
1f10276019 Merge branch 'master' into lazy-function 2022-05-30 12:16:49 +02:00
f54ba94974 progress 2022-05-29 22:23:22 +02:00
cffc2dcae9 progress 2022-05-29 13:43:51 +02:00
48e709fd84 progress 2022-05-29 13:24:02 +02:00
e8693f14a6 progress 2022-05-29 13:13:43 +02:00
d73b216e6a fix 2022-05-29 12:16:01 +02:00
a2ad563cba initial context stack 2022-05-29 12:15:26 +02:00
f5454f4a5f Merge branch 'master' into lazy-function 2022-05-28 22:10:21 +02:00
3a6d61b019 Merge branch 'master' into lazy-function 2022-05-26 13:12:37 +02:00
8b97f6414d support inlining node groups 2022-05-26 11:58:30 +02:00
28af4afa3d separate mapping from resourcse 2022-05-26 10:44:36 +02:00
73f68712d1 disable print 2022-05-26 10:31:42 +02:00
cafd20f998 store output attributes again 2022-05-26 10:26:38 +02:00
7a89bd9ad5 cleanup 2022-05-26 10:14:40 +02:00
c83b2e04ac fix 2022-05-25 20:30:30 +02:00
1468dcb614 initial support for implicit inputs 2022-05-25 19:23:23 +02:00
781454b645 support muted links 2022-05-25 18:58:24 +02:00
9181b14dc7 Merge branch 'master' into lazy-function 2022-05-25 18:54:41 +02:00
f3bb76b9c1 fixes 2022-05-22 14:05:12 +02:00
1ccb353295 fix 2022-05-22 13:34:49 +02:00
2c0d794dc4 fix 2022-05-22 12:51:27 +02:00
955f78d289 fixes 2022-05-22 12:31:28 +02:00
aa4bdf42dd fixes 2022-05-22 12:14:30 +02:00
d1ad588611 fix 2022-05-22 12:06:27 +02:00
a3f9d535d0 Merge branch 'master' into lazy-function 2022-05-22 12:02:06 +02:00
a51c9a3126 initial support for muted nodes 2022-05-22 11:56:15 +02:00
0bb0a6e7b6 Merge branch 'master' into lazy-function 2022-05-22 10:17:59 +02:00
3dabd0f705 better dot output 2022-05-21 22:22:25 +02:00
9f12236be1 cleanup 2022-05-21 22:05:11 +02:00
470f63db41 cleanup 2022-05-21 22:00:36 +02:00
2f3119a06b cleanup 2022-05-21 21:58:58 +02:00
3058283f71 cleanup 2022-05-21 21:55:38 +02:00
23abe292bd cleanup 2022-05-21 21:54:44 +02:00
20d0c51dd7 only allow dummy sockets as graph inputs and outputs 2022-05-21 21:53:14 +02:00
ed2e265f37 cleanup 2022-05-21 21:20:23 +02:00
88d035ddd3 cleanup 2022-05-21 21:06:11 +02:00
e8520c9949 progress 2022-05-21 21:00:03 +02:00
5209621e41 progress 2022-05-21 20:52:48 +02:00
103b1209b5 comments 2022-05-21 20:38:05 +02:00
c1888b372b fix 2022-05-21 15:29:12 +02:00
9d37ae1679 pass inputs from modifier 2022-05-21 15:20:19 +02:00
c7076c5185 fixes 2022-05-21 14:59:33 +02:00
93b2206945 fixes 2022-05-21 14:52:14 +02:00
978f69fb2f initial node group execution 2022-05-21 14:15:37 +02:00
10f691efb7 support lazy geometry nodes again 2022-05-21 13:55:37 +02:00
ee0589d8e2 setup default values 2022-05-21 13:49:03 +02:00
5df06ea72f progress 2022-05-21 13:21:24 +02:00
2c8af68857 create dummy socket map 2022-05-21 12:54:45 +02:00
604e74dc8b initial group node support 2022-05-21 12:46:35 +02:00
b673fe34fa progress 2022-05-21 12:30:19 +02:00
c1b5ced6ba support reroute 2022-05-21 12:22:35 +02:00
f758100275 support group input and output 2022-05-21 12:14:58 +02:00
52951fefeb progress 2022-05-21 11:53:53 +02:00
e303aad37c refactor getting multi functions for nodes 2022-05-21 11:26:13 +02:00
c15a768a9a progress 2022-05-21 11:23:59 +02:00
25bf08b337 add dummy node support 2022-05-21 11:08:01 +02:00
51e026786a implement actual type conversion 2022-05-21 10:42:40 +02:00
73d19fe9f7 update geo node params 2022-05-21 09:06:55 +02:00
649bdd77e0 remove old evaluator 2022-05-21 08:36:24 +02:00
dc75ea3c3f cleanup 2022-05-20 21:45:50 +02:00
73bb083747 Merge branch 'master' into lazy-function 2022-05-20 21:33:38 +02:00
09e37da0db remove dnode from params provider 2022-05-19 17:08:34 +02:00
a5f79ecf59 remove logger from params 2022-05-19 17:04:26 +02:00
c4c23b6f8a define geo nodes user data 2022-05-19 17:03:01 +02:00
1ce9c68e54 support passing user data through lazy functions 2022-05-19 17:00:02 +02:00
274c98fe50 cleanup 2022-05-19 16:51:06 +02:00
a27245f093 ignore frame nodes 2022-05-19 16:22:58 +02:00
8277bf8c5d insert conversion nodes 2022-05-19 16:19:03 +02:00
6c291c79ad handle multi-input 2022-05-19 15:48:08 +02:00
d0473dea53 Merge branch 'master' into lazy-function 2022-05-19 15:13:26 +02:00
e65f31a8c4 initial function generation from node tree 2022-05-19 11:08:27 +02:00
b9fc7939f8 make node chains 2022-05-19 09:35:35 +02:00
c2ec3f2a44 avoid using task pool when there is no parallel work 2022-05-19 09:16:37 +02:00
1cb24cfe17 use node indices instead of map 2022-05-19 08:56:32 +02:00
97b5a838fa improve naming 2022-05-18 22:39:59 +02:00
6c27edbc26 more cleanup 2022-05-18 22:36:25 +02:00
5b921ece53 remove sgraph 2022-05-18 22:34:07 +02:00
974e334b16 bypass task pool in many cases 2022-05-18 22:19:04 +02:00
63f885e00d pass along current task 2022-05-18 22:07:36 +02:00
d5b3e5fca9 progress 2022-05-18 21:55:14 +02:00
b9dfdb7400 progress 2022-05-18 21:29:32 +02:00
28edae5bac progress 2022-05-18 21:13:33 +02:00
2a8ccf463a fix 2022-05-18 20:16:12 +02:00
9f1d871138 progress 2022-05-18 20:11:46 +02:00
a5a7b19128 cleanup 2022-05-18 19:48:00 +02:00
95d26d80ae progress 2022-05-18 19:41:34 +02:00
32e30a4263 progress 2022-05-18 19:30:16 +02:00
3ee490b4de progress 2022-05-18 19:27:12 +02:00
29587d2cb0 progrss 2022-05-18 19:21:37 +02:00
fd919e18ae Merge branch 'master' into sgraph 2022-05-18 19:19:30 +02:00
d06a4cb4a9 progress 2022-05-18 12:05:43 +02:00
62ece09597 progress 2022-05-18 09:49:43 +02:00
80a02e54b8 progress 2022-05-18 09:42:26 +02:00
f02fdf0ff1 progress 2022-05-18 09:40:02 +02:00
457f9aa832 progress 2022-05-17 21:18:24 +02:00
c445bd7486 progress 2022-05-17 21:12:16 +02:00
48dc18ff2c progress 2022-05-17 20:54:05 +02:00
d6ebcc7619 progress 2022-05-17 20:52:46 +02:00
9896169f9a progress 2022-05-17 20:50:24 +02:00
964fb14ba4 progress 2022-05-17 20:11:00 +02:00
c8caff0216 progress 2022-05-17 20:03:42 +02:00
ccec45a2db progress 2022-05-17 19:47:08 +02:00
8b6ece9fd2 cleanup 2022-05-17 19:38:49 +02:00
8b822977b8 initial eager evaluation 2022-05-17 19:30:56 +02:00
c83224de85 progress 2022-05-17 15:18:20 +02:00
679ef9a083 progress 2022-05-17 15:01:02 +02:00
5797238694 progress 2022-05-17 14:33:01 +02:00
c8ea1f1b4b progress 2022-05-17 14:22:12 +02:00
20396034c1 progress 2022-05-17 14:19:37 +02:00
f43f912431 progress 2022-05-17 14:07:36 +02:00
a6d33079f3 progress 2022-05-17 13:53:43 +02:00
77b1cf9b68 initial lazy function 2022-05-17 13:23:21 +02:00
3356a58507 Merge branch 'master' into sgraph 2022-05-17 12:12:12 +02:00
80a590420c progress 2022-05-14 11:38:33 +02:00
dcf1c2ef9a cleanup 2022-05-14 09:02:54 +02:00
0fdd7e59df Merge branch 'master' into sgraph 2022-05-14 08:52:35 +02:00
0eef8f3747 progress 2022-05-13 17:01:22 +02:00
645d2643f9 progress 2022-05-13 16:56:00 +02:00
11a8dc2ef6 remove code that won't be used 2022-05-13 15:13:02 +02:00
1443cef855 Merge branch 'master' into sgraph 2022-05-13 15:04:48 +02:00
0d0d7c9b4c fix 2022-03-22 11:53:09 +01:00
002d849be2 progress 2022-03-22 11:48:07 +01:00
528ba923bf fix 2022-03-22 11:34:33 +01:00
3595abb204 Merge branch 'master' into sgraph 2022-03-22 11:33:03 +01:00
be856674e9 progress 2022-02-13 18:17:44 +01:00
20acbad7c3 progress 2022-02-13 17:54:59 +01:00
d2b0ae4f9d progress 2022-02-13 17:39:43 +01:00
7cd63301d3 progress 2022-02-13 13:15:53 +01:00
08016656a3 progress 2022-02-13 13:09:43 +01:00
2c2d3b7bdc progress 2022-02-13 12:57:47 +01:00
395df6766f progress 2022-02-13 12:51:22 +01:00
1690446c5e cleanup 2022-02-13 12:41:59 +01:00
a14fc38933 progress 2022-02-13 12:27:42 +01:00
fb443dd0bc cleanup 2022-02-13 12:06:08 +01:00
6058a1ffa7 progress 2022-02-13 11:58:13 +01:00
b031f1ae56 progress 2022-02-13 11:52:18 +01:00
d3c8885793 progress 2022-02-13 11:41:31 +01:00
9a65a061c5 progress 2022-02-13 11:32:14 +01:00
42f2c6a0bb progress 2022-02-13 11:18:08 +01:00
db425bd997 progress 2022-02-13 11:06:20 +01:00
735b7e171e progress 2022-02-13 11:03:56 +01:00
7e871183b7 progress 2022-02-12 14:51:20 +01:00
e2b4b17d2e fixes 2022-02-12 13:37:34 +01:00
642a461491 progress 2022-02-12 13:15:38 +01:00
cf8e500a64 progress 2022-02-12 12:41:29 +01:00
ccd1d65fe8 simplify templating 2022-02-12 12:30:27 +01:00
65917a1461 progress 2022-02-12 12:09:00 +01:00
5388195757 progress 2022-02-12 11:46:32 +01:00
2257a71154 Merge branch 'master' into sgraph 2022-02-12 11:25:20 +01:00
df4e9a0f29 progress 2022-02-07 17:25:54 +01:00
54e622c3b0 cleanup 2022-02-07 17:24:16 +01:00
754ed19656 progress 2022-02-07 17:23:23 +01:00
588d14a66d progress 2022-02-07 16:42:29 +01:00
7c159fef17 progress 2022-02-07 16:35:35 +01:00
8f7effb585 Merge branch 'master' into sgraph 2022-02-07 15:23:29 +01:00
1e29c82a5c progress 2022-02-07 13:54:43 +01:00
63e16a6ee2 comment 2022-02-06 14:19:51 +01:00
244c7f6ad2 check sgraph validity 2022-02-06 14:11:07 +01:00
9f1751914d add adapter for derived tree 2022-02-06 13:19:55 +01:00
1147c0f40d cleanup 2022-02-06 13:03:41 +01:00
96b2216cfc cleanup naming 2022-02-06 12:53:26 +01:00
c1c46bc60c enable test 2022-02-06 12:49:59 +01:00
2807625ee5 cleanup 2022-02-06 12:47:13 +01:00
04592a81b0 refactor sgraph 2022-02-06 12:46:09 +01:00
afb6cea2cf more cleanup 2022-02-06 11:46:50 +01:00
0f1302d281 use "sgraph" name 2022-02-06 11:40:52 +01:00
db85b09c70 logical tree ref adapter 2022-02-04 15:19:20 +01:00
12a037c0d1 tree ref adapter 2022-02-04 15:03:22 +01:00
9a6259264f initial node graph 2022-02-04 14:43:59 +01:00
2154 changed files with 34187 additions and 55779 deletions

View File

@@ -273,5 +273,5 @@ StatementMacros:
- PyObject_VAR_HEAD
- ccl_gpu_kernel_postfix
MacroBlockBegin: "^OSL_CLOSURE_STRUCT_BEGIN$"
MacroBlockEnd: "^OSL_CLOSURE_STRUCT_END$"
MacroBlockBegin: "^BSDF_CLOSURE_CLASS_BEGIN$"
MacroBlockEnd: "^BSDF_CLOSURE_CLASS_END$"

View File

@@ -1,12 +1,11 @@
# SPDX-License-Identifier: GPL-2.0-or-later
# Copyright 2006 Blender Foundation. All rights reserved.
# -----------------------------------------------------------------------------
# Early Initialization
# NOTE: We don't allow in-source builds. This causes no end of troubles because
#-----------------------------------------------------------------------------
# We don't allow in-source builds. This causes no end of troubles because
# all out-of-source builds will use the CMakeCache.txt file there and even
# build the libs and objects in it.
if(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR})
if(NOT DEFINED WITH_IN_SOURCE_BUILD)
message(FATAL_ERROR
@@ -36,7 +35,7 @@ endif()
list(APPEND CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/build_files/cmake/Modules")
list(APPEND CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/build_files/cmake/platform")
# Avoid having an empty `CMAKE_BUILD_TYPE`.
# avoid having empty buildtype
if(NOT DEFINED CMAKE_BUILD_TYPE_INIT)
set(CMAKE_BUILD_TYPE_INIT "Release")
# Internal logic caches this variable, avoid showing it by default
@@ -60,8 +59,7 @@ set_property(DIRECTORY APPEND PROPERTY COMPILE_DEFINITIONS
$<$<CONFIG:RelWithDebInfo>:NDEBUG>
)
# -----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Set policy
# see "cmake --help-policy CMP0003"
@@ -91,16 +89,13 @@ endif()
if(POLICY CMP0087)
cmake_policy(SET CMP0087 NEW)
endif()
# -----------------------------------------------------------------------------
# Load Blender's Local Macros
#-----------------------------------------------------------------------------
# Load some macros.
include(build_files/cmake/macros.cmake)
# -----------------------------------------------------------------------------
# Initialize Project
#-----------------------------------------------------------------------------
# Initialize project.
blender_project_hack_pre()
@@ -110,34 +105,14 @@ blender_project_hack_post()
enable_testing()
# -----------------------------------------------------------------------------
# Test Compiler Support
#
# Keep in sync with: https://wiki.blender.org/wiki/Building_Blender
if(CMAKE_COMPILER_IS_GNUCC)
if("${CMAKE_C_COMPILER_VERSION}" VERSION_LESS "9.3.1")
message(FATAL_ERROR "The minimum supported version of GCC is 9.3.1")
endif()
elseif(CMAKE_C_COMPILER_ID MATCHES "Clang")
if(CMAKE_COMPILER_IS_GNUCC AND ("${CMAKE_C_COMPILER_VERSION}" VERSION_LESS "8.0"))
message(FATAL_ERROR "The minimum supported version of CLANG is 8.0")
endif()
elseif(CMAKE_CXX_COMPILER_ID MATCHES MSVC)
if(MSVC_VERSION VERSION_LESS "1928")
message(FATAL_ERROR "The minimum supported version of MSVC is 2019 (16.9.16)")
endif()
endif()
# -----------------------------------------------------------------------------
# Test Compiler/Library Features
#-----------------------------------------------------------------------------
# Test compiler/library features.
include(build_files/cmake/have_features.cmake)
# -----------------------------------------------------------------------------
# Redirect Output Files
#-----------------------------------------------------------------------------
# Redirect output files
set(EXECUTABLE_OUTPUT_PATH ${CMAKE_BINARY_DIR}/bin CACHE INTERNAL "" FORCE)
set(LIBRARY_OUTPUT_PATH ${CMAKE_BINARY_DIR}/lib CACHE INTERNAL "" FORCE)
@@ -149,15 +124,14 @@ else()
set(TESTS_OUTPUT_DIR ${EXECUTABLE_OUTPUT_PATH}/tests/ CACHE INTERNAL "" FORCE)
endif()
# -----------------------------------------------------------------------------
# Set Default Configuration Options
#-----------------------------------------------------------------------------
# Set default config options
get_blender_version()
# -----------------------------------------------------------------------------
# Declare Options
#-----------------------------------------------------------------------------
# Options
# Blender internal features
option(WITH_BLENDER "Build blender (disable to build only the blender player)" ON)
@@ -183,6 +157,9 @@ mark_as_advanced(WITH_PYTHON_SECURITY) # some distributions see this as a secur
option(WITH_PYTHON_SAFETY "Enable internal API error checking to track invalid data to prevent crash on access (at the expense of some efficiency, only enable for development)." OFF)
mark_as_advanced(WITH_PYTHON_SAFETY)
option(WITH_PYTHON_MODULE "Enable building as a python module which runs without a user interface, like running regular blender in background mode (experimental, only enable for development), installs to PYTHON_SITE_PACKAGES (or CMAKE_INSTALL_PREFIX if WITH_INSTALL_PORTABLE is enabled)." OFF)
if(APPLE)
option(WITH_PYTHON_FRAMEWORK "Enable building using the Python available in the framework (OSX only)" OFF)
endif()
option(WITH_BUILDINFO "Include extra build details (only disable for development & faster builds)" ON)
set(BUILDINFO_OVERRIDE_DATE "" CACHE STRING "Use instead of the current date for reproducible builds (empty string disables this option)")
@@ -429,7 +406,6 @@ mark_as_advanced(WITH_CPU_SIMD)
# Cycles
option(WITH_CYCLES "Enable Cycles Render Engine" ON)
option(WITH_CYCLES_OSL "Build Cycles with OpenShadingLanguage support" ON)
option(WITH_CYCLES_PATH_GUIDING "Build Cycles with path guiding support" ON)
option(WITH_CYCLES_EMBREE "Build Cycles with Embree support" ON)
option(WITH_CYCLES_LOGGING "Build Cycles with logging support" ON)
option(WITH_CYCLES_DEBUG "Build Cycles with options useful for debugging (e.g., MIS)" OFF)
@@ -648,8 +624,8 @@ if(CMAKE_COMPILER_IS_GNUCC OR CMAKE_C_COMPILER_ID MATCHES "Clang")
unset(_asan_defaults)
if(MSVC)
find_library(
COMPILER_ASAN_LIBRARY NAMES clang_rt.asan-x86_64
find_library(
COMPILER_ASAN_LIBRARY NAMES clang_rt.asan-x86_64
PATHS
[HKEY_LOCAL_MACHINE\\SOFTWARE\\Wow6432Node\\LLVM\\LLVM;]/lib/clang/7.0.0/lib/windows
[HKEY_LOCAL_MACHINE\\SOFTWARE\\Wow6432Node\\LLVM\\LLVM;]/lib/clang/6.0.0/lib/windows
@@ -777,8 +753,8 @@ if(APPLE)
endif()
# -----------------------------------------------------------------------------
# Check for Conflicting/Unsupported Configurations
#-----------------------------------------------------------------------------
# Check for conflicting/unsupported configurations
if(NOT WITH_BLENDER AND NOT WITH_CYCLES_STANDALONE AND NOT WITH_CYCLES_HYDRA_RENDER_DELEGATE)
message(FATAL_ERROR
@@ -828,7 +804,7 @@ endif()
set_and_warn_dependency(WITH_PUGIXML WITH_OPENIMAGEIO OFF)
if(WITH_BOOST AND NOT (WITH_CYCLES OR WITH_OPENIMAGEIO OR WITH_INTERNATIONAL OR
WITH_OPENVDB OR WITH_OPENCOLORIO OR WITH_USD OR WITH_ALEMBIC))
WITH_OPENVDB OR WITH_OPENCOLORIO OR WITH_USD OR WITH_ALEMBIC))
message(STATUS "No dependencies need 'WITH_BOOST' forcing WITH_BOOST=OFF")
set(WITH_BOOST OFF)
endif()
@@ -912,11 +888,7 @@ endif()
if(WITH_CYCLES AND WITH_CYCLES_DEVICE_CUDA AND NOT WITH_CUDA_DYNLOAD)
find_package(CUDA)
if(NOT CUDA_FOUND)
message(
STATUS
"CUDA toolkit not found, "
"using dynamic runtime loading of libraries (WITH_CUDA_DYNLOAD) instead"
)
message(STATUS "CUDA toolkit not found, using dynamic runtime loading of libraries (WITH_CUDA_DYNLOAD) instead")
set(WITH_CUDA_DYNLOAD ON)
endif()
endif()
@@ -926,16 +898,14 @@ if(WITH_CYCLES_DEVICE_HIP)
set(WITH_HIP_DYNLOAD ON)
endif()
# -----------------------------------------------------------------------------
# Check if Sub-modules are Cloned
#-----------------------------------------------------------------------------
# Check if submodules are cloned.
if(WITH_INTERNATIONAL)
file(GLOB RESULT "${CMAKE_SOURCE_DIR}/release/datafiles/locale")
list(LENGTH RESULT DIR_LEN)
if(DIR_LEN EQUAL 0)
message(
WARNING
message(WARNING
"Translation path '${CMAKE_SOURCE_DIR}/release/datafiles/locale' is missing, "
"This is a 'git submodule', which are known not to work with bridges to other version "
"control systems, disabling 'WITH_INTERNATIONAL'."
@@ -953,17 +923,13 @@ if(WITH_PYTHON)
# because UNIX will search for the old Python paths which may not exist.
# giving errors about missing paths before this case is met.
if(DEFINED PYTHON_VERSION AND "${PYTHON_VERSION}" VERSION_LESS "3.10")
message(
FATAL_ERROR
"At least Python 3.10 is required to build, but found Python ${PYTHON_VERSION}"
)
message(FATAL_ERROR "At least Python 3.10 is required to build, but found Python ${PYTHON_VERSION}")
endif()
file(GLOB RESULT "${CMAKE_SOURCE_DIR}/release/scripts/addons")
list(LENGTH RESULT DIR_LEN)
if(DIR_LEN EQUAL 0)
message(
WARNING
message(WARNING
"Addons path '${CMAKE_SOURCE_DIR}/release/scripts/addons' is missing, "
"This is a 'git submodule', which are known not to work with bridges to other version "
"control systems: * CONTINUING WITHOUT ADDONS *"
@@ -971,9 +937,8 @@ if(WITH_PYTHON)
endif()
endif()
# -----------------------------------------------------------------------------
# InitialIze Un-cached Vars, Avoid Unused Warning
#-----------------------------------------------------------------------------
# Initialize un-cached vars, avoid unused warning
# linux only, not cached
set(WITH_BINRELOC OFF)
@@ -1042,13 +1007,12 @@ if(WITH_CPU_SIMD)
set(COMPILER_SSE2_FLAG)
# Test Neon first since macOS Arm can compile and run x86-64 SSE binaries.
test_neon_support()
TEST_NEON_SUPPORT()
if(NOT SUPPORT_NEON_BUILD)
test_sse_support(COMPILER_SSE_FLAG COMPILER_SSE2_FLAG)
TEST_SSE_SUPPORT(COMPILER_SSE_FLAG COMPILER_SSE2_FLAG)
endif()
endif()
# ----------------------------------------------------------------------------
# Main Platform Checks
#
@@ -1064,9 +1028,8 @@ elseif(APPLE)
include(platform_apple)
endif()
# -----------------------------------------------------------------------------
# Common Checks for Compatible Options
#-----------------------------------------------------------------------------
# Common.
if(NOT WITH_FFTW3 AND WITH_MOD_OCEANSIM)
message(FATAL_ERROR "WITH_MOD_OCEANSIM requires WITH_FFTW3 to be ON")
@@ -1074,15 +1037,13 @@ endif()
if(WITH_CYCLES)
if(NOT WITH_OPENIMAGEIO)
message(
FATAL_ERROR
message(FATAL_ERROR
"Cycles requires WITH_OPENIMAGEIO, the library may not have been found. "
"Configure OIIO or disable WITH_CYCLES"
)
endif()
if(NOT WITH_BOOST)
message(
FATAL_ERROR
message(FATAL_ERROR
"Cycles requires WITH_BOOST, the library may not have been found. "
"Configure BOOST or disable WITH_CYCLES"
)
@@ -1090,8 +1051,7 @@ if(WITH_CYCLES)
if(WITH_CYCLES_OSL)
if(NOT WITH_LLVM)
message(
FATAL_ERROR
message(FATAL_ERROR
"Cycles OSL requires WITH_LLVM, the library may not have been found. "
"Configure LLVM or disable WITH_CYCLES_OSL"
)
@@ -1101,15 +1061,14 @@ endif()
if(WITH_INTERNATIONAL)
if(NOT WITH_BOOST)
message(
FATAL_ERROR
message(FATAL_ERROR
"Internationalization requires WITH_BOOST, the library may not have been found. "
"Configure BOOST or disable WITH_INTERNATIONAL"
)
endif()
endif()
# Enable SIMD support if detected by `test_sse_support()` or `test_neon_support()`.
# Enable SIMD support if detected by TEST_SSE_SUPPORT() or TEST_NEON_SUPPORT().
#
# This is done globally, so that all modules can use it if available, and
# because these are used in headers used by many modules.
@@ -1117,7 +1076,7 @@ if(WITH_CPU_SIMD)
if(SUPPORT_NEON_BUILD)
# Neon
if(SSE2NEON_FOUND)
include_directories(SYSTEM "${SSE2NEON_INCLUDE_DIRS}")
blender_include_dirs_sys("${SSE2NEON_INCLUDE_DIRS}")
add_definitions(-DWITH_SSE2NEON)
endif()
else()
@@ -1216,18 +1175,15 @@ if(WITH_OPENVDB)
list(APPEND OPENVDB_LIBRARIES ${BOOST_LIBRARIES} ${TBB_LIBRARIES})
endif()
# -----------------------------------------------------------------------------
# Configure OpenGL
#-----------------------------------------------------------------------------
# Configure OpenGL.
if(WITH_OPENGL)
add_definitions(-DWITH_OPENGL)
endif()
# -----------------------------------------------------------------------------
# Configure Metal
#-----------------------------------------------------------------------------
# Configure Metal.
if(WITH_METAL_BACKEND)
add_definitions(-DWITH_METAL_BACKEND)
@@ -1236,10 +1192,8 @@ if(WITH_METAL_BACKEND)
# build_files/cmake/platform/platform_apple.cmake
endif()
# -----------------------------------------------------------------------------
# Configure OpenMP
#-----------------------------------------------------------------------------
# Configure OpenMP.
if(WITH_OPENMP)
if(NOT OPENMP_CUSTOM)
find_package(OpenMP)
@@ -1271,8 +1225,7 @@ if(WITH_OPENMP)
)
endif()
# -----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Configure Bullet
if(WITH_BULLET AND WITH_SYSTEM_BULLET)
@@ -1286,21 +1239,15 @@ else()
# set(BULLET_LIBRARIES "")
endif()
# -----------------------------------------------------------------------------
# Configure Python
#-----------------------------------------------------------------------------
# Configure Python.
if(WITH_PYTHON_MODULE)
# Not currently supported due to different required Python link flags.
if(WITH_GTESTS)
message(STATUS "GTests not compatible with Python module, disabling WITH_GTESTS")
set(WITH_GTESTS OFF)
endif()
add_definitions(-DPy_ENABLE_SHARED)
endif()
# -----------------------------------------------------------------------------
# Configure `GLog/GFlags`
#-----------------------------------------------------------------------------
# Configure GLog/GFlags
if(WITH_LIBMV OR WITH_GTESTS OR (WITH_CYCLES AND WITH_CYCLES_LOGGING))
if(WITH_SYSTEM_GFLAGS)
@@ -1308,7 +1255,7 @@ if(WITH_LIBMV OR WITH_GTESTS OR (WITH_CYCLES AND WITH_CYCLES_LOGGING))
if(NOT GFLAGS_FOUND)
message(FATAL_ERROR "System wide Gflags is requested but was not found")
endif()
# `FindGflags` does not define this, and we are not even sure what to use here.
# FindGflags does not define this, and we are not even sure what to use here.
set(GFLAGS_DEFINES)
else()
set(GFLAGS_DEFINES
@@ -1326,7 +1273,7 @@ if(WITH_LIBMV OR WITH_GTESTS OR (WITH_CYCLES AND WITH_CYCLES_LOGGING))
if(NOT GLOG_FOUND)
message(FATAL_ERROR "System wide Glog is requested but was not found")
endif()
# `FindGlog` does not define this, and we are not even sure what to use here.
# FindGlog does not define this, and we are not even sure what to use here.
set(GLOG_DEFINES)
else()
set(GLOG_DEFINES
@@ -1341,13 +1288,9 @@ if(WITH_LIBMV OR WITH_GTESTS OR (WITH_CYCLES AND WITH_CYCLES_LOGGING))
endif()
endif()
# -----------------------------------------------------------------------------
# Ninja Job Limiting
#-----------------------------------------------------------------------------
# Extra limits to number of jobs running in parallel for some kind os tasks.
# Only supported by Ninja build system currently.
if("${CMAKE_GENERATOR}" MATCHES "Ninja" AND WITH_NINJA_POOL_JOBS)
if(NOT NINJA_MAX_NUM_PARALLEL_COMPILE_JOBS AND
NOT NINJA_MAX_NUM_PARALLEL_COMPILE_HEAVY_JOBS AND
@@ -1359,8 +1302,7 @@ if("${CMAKE_GENERATOR}" MATCHES "Ninja" AND WITH_NINJA_POOL_JOBS)
# Note: this gives mem in MB.
cmake_host_system_information(RESULT _TOT_MEM QUERY TOTAL_PHYSICAL_MEMORY)
# Heuristics: the more cores we have, the more free memory we have to keep
# for the non-heavy tasks too.
# Heuristics... the more cores we have, the more free mem we have to keep for the non-heavy tasks too.
if(${_TOT_MEM} LESS 8000 AND ${_NUM_CORES} GREATER 2)
set(_compile_heavy_jobs "1")
elseif(${_TOT_MEM} LESS 16000 AND ${_NUM_CORES} GREATER 4)
@@ -1380,8 +1322,7 @@ if("${CMAKE_GENERATOR}" MATCHES "Ninja" AND WITH_NINJA_POOL_JOBS)
mark_as_advanced(NINJA_MAX_NUM_PARALLEL_COMPILE_HEAVY_JOBS)
set(_compile_heavy_jobs)
# Only set regular compile jobs if we set heavy jobs,
# otherwise default (using all cores) if fine.
# Only set regular compile jobs if we set heavy jobs, otherwise default (using all cores) if fine.
if(NINJA_MAX_NUM_PARALLEL_COMPILE_HEAVY_JOBS)
math(EXPR _compile_jobs "${_NUM_CORES} - 1")
else()
@@ -1392,8 +1333,8 @@ if("${CMAKE_GENERATOR}" MATCHES "Ninja" AND WITH_NINJA_POOL_JOBS)
mark_as_advanced(NINJA_MAX_NUM_PARALLEL_COMPILE_JOBS)
set(_compile_jobs)
# In practice, even when there is RAM available,
# this proves to be quicker than running in parallel (due to slow disks accesses).
# In practice, even when there is RAM available, this proves to be quicker than running in parallel
# (due to slow disks accesses).
set(NINJA_MAX_NUM_PARALLEL_LINK_JOBS "1" CACHE STRING
"Define the maximum number of concurrent link jobs, for ninja build system." FORCE)
mark_as_advanced(NINJA_MAX_NUM_PARALLEL_LINK_JOBS)
@@ -1417,83 +1358,98 @@ if("${CMAKE_GENERATOR}" MATCHES "Ninja" AND WITH_NINJA_POOL_JOBS)
endif()
endif()
# -----------------------------------------------------------------------------
# Extra Compile Flags
#-----------------------------------------------------------------------------
# Extra compile flags
if(CMAKE_COMPILER_IS_GNUCC)
add_check_c_compiler_flag(C_WARNINGS C_WARN_ALL -Wall)
add_check_c_compiler_flag(C_WARNINGS C_WARN_ERROR_IMPLICIT_FUNCTION_DECLARATION -Werror=implicit-function-declaration)
add_check_c_compiler_flag(C_WARNINGS C_WARN_ERROR_RETURN_TYPE -Werror=return-type)
add_check_c_compiler_flag(C_WARNINGS C_WARN_ERROR_VLA -Werror=vla)
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_ALL -Wall)
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_ERROR_IMPLICIT_FUNCTION_DECLARATION -Werror=implicit-function-declaration)
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_ERROR_RETURN_TYPE -Werror=return-type)
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_ERROR_VLA -Werror=vla)
# system headers sometimes do this, disable for now, was: -Werror=strict-prototypes
add_check_c_compiler_flag(C_WARNINGS C_WARN_STRICT_PROTOTYPES -Wstrict-prototypes)
add_check_c_compiler_flag(C_WARNINGS C_WARN_MISSING_PROTOTYPES -Wmissing-prototypes)
add_check_c_compiler_flag(C_WARNINGS C_WARN_NO_CHAR_SUBSCRIPTS -Wno-char-subscripts)
add_check_c_compiler_flag(C_WARNINGS C_WARN_NO_UNKNOWN_PRAGMAS -Wno-unknown-pragmas)
add_check_c_compiler_flag(C_WARNINGS C_WARN_POINTER_ARITH -Wpointer-arith)
add_check_c_compiler_flag(C_WARNINGS C_WARN_UNUSED_PARAMETER -Wunused-parameter)
add_check_c_compiler_flag(C_WARNINGS C_WARN_WRITE_STRINGS -Wwrite-strings)
add_check_c_compiler_flag(C_WARNINGS C_WARN_LOGICAL_OP -Wlogical-op)
add_check_c_compiler_flag(C_WARNINGS C_WARN_UNDEF -Wundef)
add_check_c_compiler_flag(C_WARNINGS C_WARN_INIT_SELF -Winit-self) # needs -Wuninitialized
add_check_c_compiler_flag(C_WARNINGS C_WARN_MISSING_INCLUDE_DIRS -Wmissing-include-dirs)
add_check_c_compiler_flag(C_WARNINGS C_WARN_NO_DIV_BY_ZERO -Wno-div-by-zero)
add_check_c_compiler_flag(C_WARNINGS C_WARN_TYPE_LIMITS -Wtype-limits)
add_check_c_compiler_flag(C_WARNINGS C_WARN_FORMAT_SIGN -Wformat-signedness)
add_check_c_compiler_flag(C_WARNINGS C_WARN_RESTRICT -Wrestrict)
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_STRICT_PROTOTYPES -Wstrict-prototypes)
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_MISSING_PROTOTYPES -Wmissing-prototypes)
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_NO_CHAR_SUBSCRIPTS -Wno-char-subscripts)
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_NO_UNKNOWN_PRAGMAS -Wno-unknown-pragmas)
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_POINTER_ARITH -Wpointer-arith)
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_UNUSED_PARAMETER -Wunused-parameter)
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_WRITE_STRINGS -Wwrite-strings)
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_LOGICAL_OP -Wlogical-op)
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_UNDEF -Wundef)
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_INIT_SELF -Winit-self) # needs -Wuninitialized
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_MISSING_INCLUDE_DIRS -Wmissing-include-dirs)
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_NO_DIV_BY_ZERO -Wno-div-by-zero)
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_TYPE_LIMITS -Wtype-limits)
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_FORMAT_SIGN -Wformat-signedness)
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_RESTRICT -Wrestrict)
# C-only.
add_check_c_compiler_flag(C_WARNINGS C_WARN_NO_NULL -Wnonnull)
add_check_c_compiler_flag(C_WARNINGS C_WARN_ABSOLUTE_VALUE -Wabsolute-value)
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_NO_NULL -Wnonnull)
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_ABSOLUTE_VALUE -Wabsolute-value)
add_check_c_compiler_flag(C_WARNINGS C_WARN_UNINITIALIZED -Wuninitialized)
add_check_cxx_compiler_flag(CXX_WARNINGS CXX_WARN_UNINITIALIZED -Wuninitialized)
add_check_c_compiler_flag(C_WARNINGS C_WARN_REDUNDANT_DECLS -Wredundant-decls)
add_check_cxx_compiler_flag(CXX_WARNINGS CXX_WARN_REDUNDANT_DECLS -Wredundant-decls)
add_check_c_compiler_flag(C_WARNINGS C_WARN_SHADOW -Wshadow)
# disable because it gives warnings for printf() & friends.
# add_check_c_compiler_flag(C_WARNINGS C_WARN_DOUBLE_PROMOTION -Wdouble-promotion -Wno-error=double-promotion)
if(NOT APPLE)
add_check_c_compiler_flag(C_WARNINGS C_WARN_NO_ERROR_UNUSED_BUT_SET_VARIABLE -Wno-error=unused-but-set-variable)
# gcc 4.2 gives annoying warnings on every file with this
if(NOT "${CMAKE_C_COMPILER_VERSION}" VERSION_LESS "4.3")
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_UNINITIALIZED -Wuninitialized)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_UNINITIALIZED -Wuninitialized)
endif()
add_check_cxx_compiler_flag(CXX_WARNINGS CXX_WARN_ALL -Wall)
add_check_cxx_compiler_flag(CXX_WARNINGS CXX_WARN_NO_INVALID_OFFSETOF -Wno-invalid-offsetof)
add_check_cxx_compiler_flag(CXX_WARNINGS CXX_WARN_NO_SIGN_COMPARE -Wno-sign-compare)
add_check_cxx_compiler_flag(CXX_WARNINGS CXX_WARN_LOGICAL_OP -Wlogical-op)
add_check_cxx_compiler_flag(CXX_WARNINGS CXX_WARN_INIT_SELF -Winit-self) # needs -Wuninitialized
add_check_cxx_compiler_flag(CXX_WARNINGS CXX_WARN_MISSING_INCLUDE_DIRS -Wmissing-include-dirs)
add_check_cxx_compiler_flag(CXX_WARNINGS CXX_WARN_NO_DIV_BY_ZERO -Wno-div-by-zero)
add_check_cxx_compiler_flag(CXX_WARNINGS CXX_WARN_TYPE_LIMITS -Wtype-limits)
add_check_cxx_compiler_flag(CXX_WARNINGS CXX_WARN_ERROR_RETURN_TYPE -Werror=return-type)
add_check_cxx_compiler_flag(CXX_WARNINGS CXX_WARN_NO_CHAR_SUBSCRIPTS -Wno-char-subscripts)
add_check_cxx_compiler_flag(CXX_WARNINGS CXX_WARN_NO_UNKNOWN_PRAGMAS -Wno-unknown-pragmas)
add_check_cxx_compiler_flag(CXX_WARNINGS CXX_WARN_POINTER_ARITH -Wpointer-arith)
add_check_cxx_compiler_flag(CXX_WARNINGS CXX_WARN_UNUSED_PARAMETER -Wunused-parameter)
add_check_cxx_compiler_flag(CXX_WARNINGS CXX_WARN_WRITE_STRINGS -Wwrite-strings)
add_check_cxx_compiler_flag(CXX_WARNINGS CXX_WARN_UNDEF -Wundef)
add_check_cxx_compiler_flag(CXX_WARNINGS CXX_WARN_FORMAT_SIGN -Wformat-signedness)
add_check_cxx_compiler_flag(CXX_WARNINGS CXX_WARN_RESTRICT -Wrestrict)
add_check_cxx_compiler_flag(CXX_WARNINGS CXX_WARN_NO_SUGGEST_OVERRIDE -Wno-suggest-override)
add_check_cxx_compiler_flag(CXX_WARNINGS CXX_WARN_UNINITIALIZED -Wuninitialized)
# versions before gcc4.6 give many BLI_math warnings
if(NOT "${CMAKE_C_COMPILER_VERSION}" VERSION_LESS "4.6")
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_REDUNDANT_DECLS -Wredundant-decls)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_REDUNDANT_DECLS -Wredundant-decls)
endif()
# versions before gcc4.8 include global name-space.
if(NOT "${CMAKE_C_COMPILER_VERSION}" VERSION_LESS "4.8")
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_SHADOW -Wshadow)
endif()
# disable because it gives warnings for printf() & friends.
# ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_DOUBLE_PROMOTION -Wdouble-promotion -Wno-error=double-promotion)
if(NOT APPLE)
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_NO_ERROR_UNUSED_BUT_SET_VARIABLE -Wno-error=unused-but-set-variable)
endif()
ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_ALL -Wall)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_NO_INVALID_OFFSETOF -Wno-invalid-offsetof)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_NO_SIGN_COMPARE -Wno-sign-compare)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_LOGICAL_OP -Wlogical-op)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_INIT_SELF -Winit-self) # needs -Wuninitialized
ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_MISSING_INCLUDE_DIRS -Wmissing-include-dirs)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_NO_DIV_BY_ZERO -Wno-div-by-zero)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_TYPE_LIMITS -Wtype-limits)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_ERROR_RETURN_TYPE -Werror=return-type)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_NO_CHAR_SUBSCRIPTS -Wno-char-subscripts)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_NO_UNKNOWN_PRAGMAS -Wno-unknown-pragmas)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_POINTER_ARITH -Wpointer-arith)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_UNUSED_PARAMETER -Wunused-parameter)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_WRITE_STRINGS -Wwrite-strings)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_UNDEF -Wundef)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_FORMAT_SIGN -Wformat-signedness)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_RESTRICT -Wrestrict)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_NO_SUGGEST_OVERRIDE -Wno-suggest-override)
# gcc 4.2 gives annoying warnings on every file with this
if(NOT "${CMAKE_C_COMPILER_VERSION}" VERSION_LESS "4.3")
ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_UNINITIALIZED -Wuninitialized)
endif()
# causes too many warnings
if(NOT APPLE)
add_check_cxx_compiler_flag(CXX_WARNINGS CXX_WARN_UNDEF -Wundef)
add_check_cxx_compiler_flag(CXX_WARNINGS CXX_WARN_MISSING_DECLARATIONS -Wmissing-declarations)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_UNDEF -Wundef)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_MISSING_DECLARATIONS -Wmissing-declarations)
endif()
# Use 'ATTR_FALLTHROUGH' macro to suppress.
add_check_c_compiler_flag(C_WARNINGS C_WARN_IMPLICIT_FALLTHROUGH -Wimplicit-fallthrough=5)
add_check_cxx_compiler_flag(CXX_WARNINGS CXX_WARN_IMPLICIT_FALLTHROUGH -Wimplicit-fallthrough=5)
if(CMAKE_COMPILER_IS_GNUCC AND (NOT "${CMAKE_C_COMPILER_VERSION}" VERSION_LESS "7.0"))
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_IMPLICIT_FALLTHROUGH -Wimplicit-fallthrough=5)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_IMPLICIT_FALLTHROUGH -Wimplicit-fallthrough=5)
endif()
# ---------------------
#----------------------
# Suppress Strict Flags
#
# Exclude the following warnings from this list:
@@ -1505,100 +1461,102 @@ if(CMAKE_COMPILER_IS_GNUCC)
# If code in `./extern/` needs to suppress these flags that can be done on a case-by-case basis.
# flags to undo strict flags
add_check_c_compiler_flag(C_REMOVE_STRICT_FLAGS C_WARN_NO_DEPRECATED_DECLARATIONS -Wno-deprecated-declarations)
add_check_c_compiler_flag(C_REMOVE_STRICT_FLAGS C_WARN_NO_UNUSED_PARAMETER -Wno-unused-parameter)
add_check_c_compiler_flag(C_REMOVE_STRICT_FLAGS C_WARN_NO_UNUSED_FUNCTION -Wno-unused-function)
add_check_c_compiler_flag(C_REMOVE_STRICT_FLAGS C_WARN_NO_TYPE_LIMITS -Wno-type-limits)
add_check_c_compiler_flag(C_REMOVE_STRICT_FLAGS C_WARN_NO_INT_IN_BOOL_CONTEXT -Wno-int-in-bool-context)
add_check_c_compiler_flag(C_REMOVE_STRICT_FLAGS C_WARN_NO_FORMAT -Wno-format)
add_check_c_compiler_flag(C_REMOVE_STRICT_FLAGS C_WARN_NO_SWITCH -Wno-switch)
add_check_c_compiler_flag(C_REMOVE_STRICT_FLAGS C_WARN_NO_UNUSED_VARIABLE -Wno-unused-variable)
add_check_c_compiler_flag(C_REMOVE_STRICT_FLAGS C_WARN_NO_UNUSED_VARIABLE -Wno-uninitialized)
ADD_CHECK_C_COMPILER_FLAG(C_REMOVE_STRICT_FLAGS C_WARN_NO_DEPRECATED_DECLARATIONS -Wno-deprecated-declarations)
ADD_CHECK_C_COMPILER_FLAG(C_REMOVE_STRICT_FLAGS C_WARN_NO_UNUSED_PARAMETER -Wno-unused-parameter)
ADD_CHECK_C_COMPILER_FLAG(C_REMOVE_STRICT_FLAGS C_WARN_NO_UNUSED_FUNCTION -Wno-unused-function)
ADD_CHECK_C_COMPILER_FLAG(C_REMOVE_STRICT_FLAGS C_WARN_NO_TYPE_LIMITS -Wno-type-limits)
ADD_CHECK_C_COMPILER_FLAG(C_REMOVE_STRICT_FLAGS C_WARN_NO_INT_IN_BOOL_CONTEXT -Wno-int-in-bool-context)
ADD_CHECK_C_COMPILER_FLAG(C_REMOVE_STRICT_FLAGS C_WARN_NO_FORMAT -Wno-format)
ADD_CHECK_C_COMPILER_FLAG(C_REMOVE_STRICT_FLAGS C_WARN_NO_SWITCH -Wno-switch)
ADD_CHECK_C_COMPILER_FLAG(C_REMOVE_STRICT_FLAGS C_WARN_NO_UNUSED_VARIABLE -Wno-unused-variable)
ADD_CHECK_C_COMPILER_FLAG(C_REMOVE_STRICT_FLAGS C_WARN_NO_UNUSED_VARIABLE -Wno-uninitialized)
add_check_cxx_compiler_flag(CXX_REMOVE_STRICT_FLAGS CXX_WARN_NO_CLASS_MEMACCESS -Wno-class-memaccess)
add_check_cxx_compiler_flag(CXX_REMOVE_STRICT_FLAGS CXX_WARN_NO_COMMENT -Wno-comment)
add_check_cxx_compiler_flag(CXX_REMOVE_STRICT_FLAGS CXX_WARN_NO_UNUSED_TYPEDEFS -Wno-unused-local-typedefs)
add_check_cxx_compiler_flag(CXX_REMOVE_STRICT_FLAGS CXX_WARN_NO_UNUSED_VARIABLE -Wno-unused-variable)
add_check_cxx_compiler_flag(CXX_REMOVE_STRICT_FLAGS CXX_WARN_NO_UNUSED_VARIABLE -Wno-uninitialized)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_REMOVE_STRICT_FLAGS CXX_WARN_NO_CLASS_MEMACCESS -Wno-class-memaccess)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_REMOVE_STRICT_FLAGS CXX_WARN_NO_COMMENT -Wno-comment)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_REMOVE_STRICT_FLAGS CXX_WARN_NO_UNUSED_TYPEDEFS -Wno-unused-local-typedefs)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_REMOVE_STRICT_FLAGS CXX_WARN_NO_UNUSED_VARIABLE -Wno-unused-variable)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_REMOVE_STRICT_FLAGS CXX_WARN_NO_UNUSED_VARIABLE -Wno-uninitialized)
add_check_c_compiler_flag(C_REMOVE_STRICT_FLAGS C_WARN_NO_IMPLICIT_FALLTHROUGH -Wno-implicit-fallthrough)
if(CMAKE_COMPILER_IS_GNUCC AND (NOT "${CMAKE_C_COMPILER_VERSION}" VERSION_LESS "7.0"))
ADD_CHECK_C_COMPILER_FLAG(C_REMOVE_STRICT_FLAGS C_WARN_NO_IMPLICIT_FALLTHROUGH -Wno-implicit-fallthrough)
endif()
if(NOT APPLE)
add_check_c_compiler_flag(C_REMOVE_STRICT_FLAGS C_WARN_NO_ERROR_UNUSED_BUT_SET_VARIABLE -Wno-error=unused-but-set-variable)
ADD_CHECK_C_COMPILER_FLAG(C_REMOVE_STRICT_FLAGS C_WARN_NO_ERROR_UNUSED_BUT_SET_VARIABLE -Wno-error=unused-but-set-variable)
endif()
elseif(CMAKE_C_COMPILER_ID MATCHES "Clang")
# strange, clang complains these are not supported, but then uses them.
add_check_c_compiler_flag(C_WARNINGS C_WARN_ALL -Wall)
add_check_c_compiler_flag(C_WARNINGS C_WARN_ERROR_IMPLICIT_FUNCTION_DECLARATION -Werror=implicit-function-declaration)
add_check_c_compiler_flag(C_WARNINGS C_WARN_ERROR_RETURN_TYPE -Werror=return-type)
add_check_c_compiler_flag(C_WARNINGS C_WARN_NO_AUTOLOGICAL_COMPARE -Wno-tautological-compare)
add_check_c_compiler_flag(C_WARNINGS C_WARN_NO_UNKNOWN_PRAGMAS -Wno-unknown-pragmas)
add_check_c_compiler_flag(C_WARNINGS C_WARN_NO_CHAR_SUBSCRIPTS -Wno-char-subscripts)
add_check_c_compiler_flag(C_WARNINGS C_WARN_STRICT_PROTOTYPES -Wstrict-prototypes)
add_check_c_compiler_flag(C_WARNINGS C_WARN_MISSING_PROTOTYPES -Wmissing-prototypes)
add_check_c_compiler_flag(C_WARNINGS C_WARN_UNUSED_PARAMETER -Wunused-parameter)
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_ALL -Wall)
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_ERROR_IMPLICIT_FUNCTION_DECLARATION -Werror=implicit-function-declaration)
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_ERROR_RETURN_TYPE -Werror=return-type)
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_NO_AUTOLOGICAL_COMPARE -Wno-tautological-compare)
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_NO_UNKNOWN_PRAGMAS -Wno-unknown-pragmas)
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_NO_CHAR_SUBSCRIPTS -Wno-char-subscripts)
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_STRICT_PROTOTYPES -Wstrict-prototypes)
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_MISSING_PROTOTYPES -Wmissing-prototypes)
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_UNUSED_PARAMETER -Wunused-parameter)
add_check_cxx_compiler_flag(CXX_WARNINGS CXX_WARN_ALL -Wall)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_ALL -Wall)
# Using C++20 features while having C++17 as the project language isn't allowed by MSVC.
add_check_cxx_compiler_flag(CXX_WARNINGS CXX_CXX20_DESIGNATOR -Wc++20-designator)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_CXX20_DESIGNATOR -Wc++20-designator)
add_check_cxx_compiler_flag(CXX_WARNINGS CXX_WARN_NO_AUTOLOGICAL_COMPARE -Wno-tautological-compare)
add_check_cxx_compiler_flag(CXX_WARNINGS CXX_WARN_NO_UNKNOWN_PRAGMAS -Wno-unknown-pragmas)
add_check_cxx_compiler_flag(CXX_WARNINGS CXX_WARN_NO_CHAR_SUBSCRIPTS -Wno-char-subscripts)
add_check_cxx_compiler_flag(CXX_WARNINGS CXX_WARN_NO_OVERLOADED_VIRTUAL -Wno-overloaded-virtual) # we get a lot of these, if its a problem a dev needs to look into it.
add_check_cxx_compiler_flag(CXX_WARNINGS CXX_WARN_NO_SIGN_COMPARE -Wno-sign-compare)
add_check_cxx_compiler_flag(CXX_WARNINGS CXX_WARN_NO_INVALID_OFFSETOF -Wno-invalid-offsetof)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_NO_AUTOLOGICAL_COMPARE -Wno-tautological-compare)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_NO_UNKNOWN_PRAGMAS -Wno-unknown-pragmas)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_NO_CHAR_SUBSCRIPTS -Wno-char-subscripts)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_NO_OVERLOADED_VIRTUAL -Wno-overloaded-virtual) # we get a lot of these, if its a problem a dev needs to look into it.
ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_NO_SIGN_COMPARE -Wno-sign-compare)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_NO_INVALID_OFFSETOF -Wno-invalid-offsetof)
# Apple Clang (tested on version 12) doesn't support this flag while LLVM Clang 11 does.
add_check_cxx_compiler_flag(CXX_WARNINGS CXX_WARN_NO_SUGGEST_OVERRIDE -Wno-suggest-override)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_NO_SUGGEST_OVERRIDE -Wno-suggest-override)
# gives too many unfixable warnings
# add_check_c_compiler_flag(C_WARNINGS C_WARN_UNUSED_MACROS -Wunused-macros)
# add_check_cxx_compiler_flag(CXX_WARNINGS CXX_WARN_UNUSED_MACROS -Wunused-macros)
# ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_UNUSED_MACROS -Wunused-macros)
# ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_UNUSED_MACROS -Wunused-macros)
# ---------------------
#----------------------
# Suppress Strict Flags
# flags to undo strict flags
add_check_c_compiler_flag(C_REMOVE_STRICT_FLAGS C_WARN_NO_UNUSED_PARAMETER -Wno-unused-parameter)
add_check_c_compiler_flag(C_REMOVE_STRICT_FLAGS C_WARN_NO_UNUSED_VARIABLE -Wno-unused-variable)
add_check_c_compiler_flag(C_REMOVE_STRICT_FLAGS C_WARN_NO_UNUSED_MACROS -Wno-unused-macros)
add_check_c_compiler_flag(C_REMOVE_STRICT_FLAGS C_WARN_NO_MISLEADING_INDENTATION -Wno-misleading-indentation)
ADD_CHECK_C_COMPILER_FLAG(C_REMOVE_STRICT_FLAGS C_WARN_NO_UNUSED_PARAMETER -Wno-unused-parameter)
ADD_CHECK_C_COMPILER_FLAG(C_REMOVE_STRICT_FLAGS C_WARN_NO_UNUSED_VARIABLE -Wno-unused-variable)
ADD_CHECK_C_COMPILER_FLAG(C_REMOVE_STRICT_FLAGS C_WARN_NO_UNUSED_MACROS -Wno-unused-macros)
ADD_CHECK_C_COMPILER_FLAG(C_REMOVE_STRICT_FLAGS C_WARN_NO_MISLEADING_INDENTATION -Wno-misleading-indentation)
add_check_c_compiler_flag(C_REMOVE_STRICT_FLAGS C_WARN_NO_MISSING_VARIABLE_DECLARATIONS -Wno-missing-variable-declarations)
add_check_c_compiler_flag(C_REMOVE_STRICT_FLAGS C_WARN_NO_INCOMPAT_PTR_DISCARD_QUAL -Wno-incompatible-pointer-types-discards-qualifiers)
add_check_c_compiler_flag(C_REMOVE_STRICT_FLAGS C_WARN_NO_UNUSED_FUNCTION -Wno-unused-function)
add_check_c_compiler_flag(C_REMOVE_STRICT_FLAGS C_WARN_NO_INT_TO_VOID_POINTER_CAST -Wno-int-to-void-pointer-cast)
add_check_c_compiler_flag(C_REMOVE_STRICT_FLAGS C_WARN_NO_MISSING_PROTOTYPES -Wno-missing-prototypes)
add_check_c_compiler_flag(C_REMOVE_STRICT_FLAGS C_WARN_NO_DUPLICATE_ENUM -Wno-duplicate-enum)
add_check_c_compiler_flag(C_REMOVE_STRICT_FLAGS C_WARN_NO_UNDEF -Wno-undef)
add_check_c_compiler_flag(C_REMOVE_STRICT_FLAGS C_WARN_NO_MISSING_NORETURN -Wno-missing-noreturn)
add_check_c_compiler_flag(C_REMOVE_STRICT_FLAGS C_WARN_NO_UNUSED_BUT_SET_VARIABLE -Wno-unused-but-set-variable)
add_check_c_compiler_flag(C_REMOVE_STRICT_FLAGS C_WARN_NO_DEPRECATED_DECLARATIONS -Wno-deprecated-declarations)
ADD_CHECK_C_COMPILER_FLAG(C_REMOVE_STRICT_FLAGS C_WARN_NO_MISSING_VARIABLE_DECLARATIONS -Wno-missing-variable-declarations)
ADD_CHECK_C_COMPILER_FLAG(C_REMOVE_STRICT_FLAGS C_WARN_NO_INCOMPAT_PTR_DISCARD_QUAL -Wno-incompatible-pointer-types-discards-qualifiers)
ADD_CHECK_C_COMPILER_FLAG(C_REMOVE_STRICT_FLAGS C_WARN_NO_UNUSED_FUNCTION -Wno-unused-function)
ADD_CHECK_C_COMPILER_FLAG(C_REMOVE_STRICT_FLAGS C_WARN_NO_INT_TO_VOID_POINTER_CAST -Wno-int-to-void-pointer-cast)
ADD_CHECK_C_COMPILER_FLAG(C_REMOVE_STRICT_FLAGS C_WARN_NO_MISSING_PROTOTYPES -Wno-missing-prototypes)
ADD_CHECK_C_COMPILER_FLAG(C_REMOVE_STRICT_FLAGS C_WARN_NO_DUPLICATE_ENUM -Wno-duplicate-enum)
ADD_CHECK_C_COMPILER_FLAG(C_REMOVE_STRICT_FLAGS C_WARN_NO_UNDEF -Wno-undef)
ADD_CHECK_C_COMPILER_FLAG(C_REMOVE_STRICT_FLAGS C_WARN_NO_MISSING_NORETURN -Wno-missing-noreturn)
ADD_CHECK_C_COMPILER_FLAG(C_REMOVE_STRICT_FLAGS C_WARN_NO_UNUSED_BUT_SET_VARIABLE -Wno-unused-but-set-variable)
ADD_CHECK_C_COMPILER_FLAG(C_REMOVE_STRICT_FLAGS C_WARN_NO_DEPRECATED_DECLARATIONS -Wno-deprecated-declarations)
add_check_cxx_compiler_flag(CXX_REMOVE_STRICT_FLAGS CXX_WARN_NO_UNUSED_PARAMETER -Wno-unused-parameter)
add_check_cxx_compiler_flag(CXX_REMOVE_STRICT_FLAGS CXX_WARN_NO_UNUSED_PRIVATE_FIELD -Wno-unused-private-field)
add_check_cxx_compiler_flag(CXX_REMOVE_STRICT_FLAGS CXX_WARN_NO_CXX11_NARROWING -Wno-c++11-narrowing)
add_check_cxx_compiler_flag(CXX_REMOVE_STRICT_FLAGS CXX_WARN_NO_NON_VIRTUAL_DTOR -Wno-non-virtual-dtor)
add_check_cxx_compiler_flag(CXX_REMOVE_STRICT_FLAGS CXX_WARN_NO_UNUSED_MACROS -Wno-unused-macros)
add_check_cxx_compiler_flag(CXX_REMOVE_STRICT_FLAGS CXX_WARN_NO_UNUSED_VARIABLE -Wno-unused-variable)
add_check_cxx_compiler_flag(CXX_REMOVE_STRICT_FLAGS CXX_WARN_NO_REORDER -Wno-reorder)
add_check_cxx_compiler_flag(CXX_REMOVE_STRICT_FLAGS CXX_WARN_NO_COMMENT -Wno-comment)
add_check_cxx_compiler_flag(CXX_REMOVE_STRICT_FLAGS CXX_WARN_NO_UNUSED_TYPEDEFS -Wno-unused-local-typedefs)
add_check_cxx_compiler_flag(CXX_REMOVE_STRICT_FLAGS CXX_WARN_NO_UNDEFINED_VAR_TEMPLATE -Wno-undefined-var-template)
add_check_cxx_compiler_flag(CXX_REMOVE_STRICT_FLAGS CXX_WARN_NO_INSTANTIATION_AFTER_SPECIALIZATION -Wno-instantiation-after-specialization)
add_check_cxx_compiler_flag(CXX_REMOVE_STRICT_FLAGS CXX_WARN_NO_MISLEADING_INDENTATION -Wno-misleading-indentation)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_REMOVE_STRICT_FLAGS CXX_WARN_NO_UNUSED_PARAMETER -Wno-unused-parameter)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_REMOVE_STRICT_FLAGS CXX_WARN_NO_UNUSED_PRIVATE_FIELD -Wno-unused-private-field)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_REMOVE_STRICT_FLAGS CXX_WARN_NO_CXX11_NARROWING -Wno-c++11-narrowing)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_REMOVE_STRICT_FLAGS CXX_WARN_NO_NON_VIRTUAL_DTOR -Wno-non-virtual-dtor)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_REMOVE_STRICT_FLAGS CXX_WARN_NO_UNUSED_MACROS -Wno-unused-macros)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_REMOVE_STRICT_FLAGS CXX_WARN_NO_UNUSED_VARIABLE -Wno-unused-variable)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_REMOVE_STRICT_FLAGS CXX_WARN_NO_REORDER -Wno-reorder)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_REMOVE_STRICT_FLAGS CXX_WARN_NO_COMMENT -Wno-comment)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_REMOVE_STRICT_FLAGS CXX_WARN_NO_UNUSED_TYPEDEFS -Wno-unused-local-typedefs)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_REMOVE_STRICT_FLAGS CXX_WARN_NO_UNDEFINED_VAR_TEMPLATE -Wno-undefined-var-template)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_REMOVE_STRICT_FLAGS CXX_WARN_NO_INSTANTIATION_AFTER_SPECIALIZATION -Wno-instantiation-after-specialization)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_REMOVE_STRICT_FLAGS CXX_WARN_NO_MISLEADING_INDENTATION -Wno-misleading-indentation)
elseif(CMAKE_C_COMPILER_ID MATCHES "Intel")
add_check_c_compiler_flag(C_WARNINGS C_WARN_ALL -Wall)
add_check_c_compiler_flag(C_WARNINGS C_WARN_POINTER_ARITH -Wpointer-arith)
add_check_c_compiler_flag(C_WARNINGS C_WARN_NO_UNKNOWN_PRAGMAS -Wno-unknown-pragmas)
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_ALL -Wall)
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_POINTER_ARITH -Wpointer-arith)
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_NO_UNKNOWN_PRAGMAS -Wno-unknown-pragmas)
add_check_cxx_compiler_flag(CXX_WARNINGS CXX_WARN_ALL -Wall)
add_check_cxx_compiler_flag(CXX_WARNINGS CXX_WARN_NO_INVALID_OFFSETOF -Wno-invalid-offsetof)
add_check_cxx_compiler_flag(CXX_WARNINGS CXX_WARN_NO_SIGN_COMPARE -Wno-sign-compare)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_ALL -Wall)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_NO_INVALID_OFFSETOF -Wno-invalid-offsetof)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_NO_SIGN_COMPARE -Wno-sign-compare)
# disable numbered, false positives
string(APPEND C_WARNINGS " -wd188,186,144,913,556,858,597,177,1292,167,279,592,94,2722,3199")
@@ -1611,8 +1569,6 @@ elseif(CMAKE_C_COMPILER_ID MATCHES "MSVC")
"/w34062" # switch statement contains 'default' but no 'case' labels
"/w34115" # 'type' : named type definition in parentheses
"/w34189" # local variable is initialized but not referenced
# see https://docs.microsoft.com/en-us/cpp/error-messages/compiler-warnings/c5038?view=vs-2017
"/w35038" # order of initialization in c++ constructors
# disable:
"/wd4018" # signed/unsigned mismatch
"/wd4146" # unary minus operator applied to unsigned type, result still unsigned
@@ -1632,9 +1588,13 @@ elseif(CMAKE_C_COMPILER_ID MATCHES "MSVC")
"/we4013" # 'function' undefined; assuming extern returning int
"/we4133" # incompatible pointer types
"/we4431" # missing type specifier - int assumed
"/we4033" # 'function' must return a value
)
if(MSVC_VERSION GREATER_EQUAL 1911)
# see https://docs.microsoft.com/en-us/cpp/error-messages/compiler-warnings/c5038?view=vs-2017
string(APPEND _WARNINGS " /w35038") # order of initialization in c++ constructors
endif()
string(REPLACE ";" " " _WARNINGS "${_WARNINGS}")
set(C_WARNINGS "${_WARNINGS}")
set(CXX_WARNINGS "${_WARNINGS}")
@@ -1646,8 +1606,7 @@ endif()
# be most problematic.
if(WITH_PYTHON)
if(NOT EXISTS "${PYTHON_INCLUDE_DIR}/Python.h")
message(
FATAL_ERROR
message(FATAL_ERROR
"Missing: \"${PYTHON_INCLUDE_DIR}/Python.h\",\n"
"Set the cache entry 'PYTHON_INCLUDE_DIR' to point "
"to a valid python include path. Containing "
@@ -1655,8 +1614,8 @@ if(WITH_PYTHON)
)
endif()
if(WIN32)
# Always use numpy bundled in precompiled libs.
if(WIN32 OR APPLE)
# Windows and macOS have this bundled with Python libraries.
elseif((WITH_PYTHON_INSTALL AND WITH_PYTHON_INSTALL_NUMPY) OR WITH_PYTHON_NUMPY)
if(("${PYTHON_NUMPY_PATH}" STREQUAL "") OR (${PYTHON_NUMPY_PATH} MATCHES NOTFOUND))
find_python_package(numpy "core/include")
@@ -1664,13 +1623,13 @@ if(WITH_PYTHON)
endif()
if(WIN32 OR APPLE)
# Always copy from precompiled libs.
# pass, we have this in lib/python/site-packages
elseif(WITH_PYTHON_INSTALL_REQUESTS)
find_python_package(requests "")
endif()
if(WIN32 OR APPLE)
# Always copy from precompiled libs.
# pass, we have this in lib/python/site-packages
elseif(WITH_PYTHON_INSTALL_ZSTANDARD)
find_python_package(zstandard "")
endif()
@@ -1686,7 +1645,7 @@ set(CMAKE_CXX_EXTENSIONS OFF)
# Make MSVC properly report the value of the __cplusplus preprocessor macro
# Available MSVC 15.7 (1914) and up, without this it reports 199711L regardless
# of the C++ standard chosen above.
if(MSVC)
if(MSVC AND MSVC_VERSION GREATER 1913)
string(APPEND CMAKE_CXX_FLAGS " /Zc:__cplusplus")
endif()
@@ -1709,18 +1668,16 @@ endif()
if(WITH_COMPILER_SHORT_FILE_MACRO)
# Use '-fmacro-prefix-map' for Clang and GCC (MSVC doesn't support this).
add_check_c_compiler_flag(C_PREFIX_MAP_FLAGS C_MACRO_PREFIX_MAP -fmacro-prefix-map=foo=bar)
add_check_cxx_compiler_flag(CXX_PREFIX_MAP_FLAGS CXX_MACRO_PREFIX_MAP -fmacro-prefix-map=foo=bar)
ADD_CHECK_C_COMPILER_FLAG(C_PREFIX_MAP_FLAGS C_MACRO_PREFIX_MAP -fmacro-prefix-map=foo=bar)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_PREFIX_MAP_FLAGS CXX_MACRO_PREFIX_MAP -fmacro-prefix-map=foo=bar)
if(C_MACRO_PREFIX_MAP AND CXX_MACRO_PREFIX_MAP)
if(APPLE)
if(XCODE AND ${XCODE_VERSION} VERSION_LESS 12.0)
# Developers may have say LLVM Clang-10.0.1 toolchain (which supports the flag)
# with Xcode-11 (the Clang of which doesn't support the flag).
message(
WARNING
message(WARNING
"-fmacro-prefix-map flag is NOT supported by Clang shipped with Xcode-${XCODE_VERSION}."
" Some Xcode functionality in Product menu may not work. "
"Disabling WITH_COMPILER_SHORT_FILE_MACRO."
" Some Xcode functionality in Product menu may not work. Disabling WITH_COMPILER_SHORT_FILE_MACRO."
)
set(WITH_COMPILER_SHORT_FILE_MACRO OFF)
endif()
@@ -1736,8 +1693,7 @@ if(WITH_COMPILER_SHORT_FILE_MACRO)
unset(_bin_dir)
endif()
else()
message(
WARNING
message(WARNING
"-fmacro-prefix-map flag is NOT supported by C/C++ compiler."
" Disabling WITH_COMPILER_SHORT_FILE_MACRO."
)
@@ -1767,8 +1723,7 @@ mark_as_advanced(
LLVM_VERSION
)
# -------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# Global Defines
# better not set includes here but this debugging option is off by default.
@@ -1784,9 +1739,8 @@ endif()
# message(STATUS "Using CFLAGS: ${CMAKE_C_FLAGS}")
# message(STATUS "Using CXXFLAGS: ${CMAKE_CXX_FLAGS}")
# -----------------------------------------------------------------------------
# Add Sub-Directories
#-----------------------------------------------------------------------------
# Libraries
if(WITH_BLENDER)
add_subdirectory(intern)
@@ -1815,41 +1769,33 @@ elseif(WITH_CYCLES_STANDALONE OR WITH_CYCLES_HYDRA_RENDER_DELEGATE)
endif()
endif()
# -----------------------------------------------------------------------------
# Add Testing Directory
#-----------------------------------------------------------------------------
# Testing
add_subdirectory(tests)
# -----------------------------------------------------------------------------
# Add Blender Application
#-----------------------------------------------------------------------------
# Blender Application
if(WITH_BLENDER)
add_subdirectory(source/creator)
endif()
# -----------------------------------------------------------------------------
# Define 'heavy' sub-modules (for Ninja builder when using pools)
#-----------------------------------------------------------------------------
# Define 'heavy' submodules (for Ninja builder when using pools).
setup_heavy_lib_pool()
# -----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# CPack for generating packages
include(build_files/cmake/packaging.cmake)
# -----------------------------------------------------------------------------
# Use Dynamic Loading for OpenMP
#-----------------------------------------------------------------------------
# Use dynamic loading for OpenMP
if(WITH_BLENDER)
openmp_delayload(blender)
endif()
# -----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Print Final Configuration
if(FIRST_RUN)
@@ -1945,6 +1891,9 @@ if(FIRST_RUN)
info_cfg_option(WITH_LZO)
info_cfg_text("Python:")
if(APPLE)
info_cfg_option(WITH_PYTHON_FRAMEWORK)
endif()
info_cfg_option(WITH_PYTHON_INSTALL)
info_cfg_option(WITH_PYTHON_INSTALL_NUMPY)
info_cfg_option(WITH_PYTHON_INSTALL_ZSTANDARD)

View File

@@ -162,7 +162,6 @@ CPU:=$(shell uname -m)
# Source and Build DIR's
BLENDER_DIR:=$(shell pwd -P)
BUILD_TYPE:=Release
BLENDER_IS_PYTHON_MODULE:=
# CMake arguments, assigned to local variable to make it mutable.
CMAKE_CONFIG_ARGS := $(BUILD_CMAKE_ARGS)
@@ -230,18 +229,9 @@ endif
# -----------------------------------------------------------------------------
# Additional targets for the build configuration
# additional targets for the build configuration
# NOTE: These targets can be combined and are applied in reverse order listed here.
# So it's important that `bpy` comes before `release` (for example)
# `make bpy release` first loads `release` configuration, then `bpy`.
# This is important as `bpy` will turn off some settings enabled by release.
ifneq "$(findstring bpy, $(MAKECMDGOALS))" ""
BUILD_DIR:=$(BUILD_DIR)_bpy
CMAKE_CONFIG_ARGS:=-C"$(BLENDER_DIR)/build_files/cmake/config/bpy_module.cmake" $(CMAKE_CONFIG_ARGS)
BLENDER_IS_PYTHON_MODULE:=1
endif
# support 'make debug'
ifneq "$(findstring debug, $(MAKECMDGOALS))" ""
BUILD_DIR:=$(BUILD_DIR)_debug
BUILD_TYPE:=Debug
@@ -266,6 +256,10 @@ ifneq "$(findstring headless, $(MAKECMDGOALS))" ""
BUILD_DIR:=$(BUILD_DIR)_headless
CMAKE_CONFIG_ARGS:=-C"$(BLENDER_DIR)/build_files/cmake/config/blender_headless.cmake" $(CMAKE_CONFIG_ARGS)
endif
ifneq "$(findstring bpy, $(MAKECMDGOALS))" ""
BUILD_DIR:=$(BUILD_DIR)_bpy
CMAKE_CONFIG_ARGS:=-C"$(BLENDER_DIR)/build_files/cmake/config/bpy_module.cmake" $(CMAKE_CONFIG_ARGS)
endif
ifneq "$(findstring developer, $(MAKECMDGOALS))" ""
CMAKE_CONFIG_ARGS:=-C"$(BLENDER_DIR)/build_files/cmake/config/blender_developer.cmake" $(CMAKE_CONFIG_ARGS)
@@ -303,10 +297,8 @@ endif
# use the default build path can still use utility helpers.
ifeq ($(OS), Darwin)
BLENDER_BIN?="$(BUILD_DIR)/bin/Blender.app/Contents/MacOS/Blender"
BLENDER_BIN_DIR?="$(BUILD_DIR)/bin/Blender.app/Contents/MacOS/Blender"
else
BLENDER_BIN?="$(BUILD_DIR)/bin/blender"
BLENDER_BIN_DIR?="$(BUILD_DIR)/bin"
endif
@@ -363,12 +355,8 @@ all: .FORCE
@echo Building Blender ...
$(BUILD_COMMAND) -C "$(BUILD_DIR)" -j $(NPROCS) install
@echo
@echo Edit build configuration with: \"$(BUILD_DIR)/CMakeCache.txt\" run make again to rebuild.
@if test -z "$(BLENDER_IS_PYTHON_MODULE)"; then \
echo Blender successfully built, run from: $(BLENDER_BIN); \
else \
echo Blender successfully built as a Python module, \"bpy\" can be imported from: $(BLENDER_BIN_DIR); \
fi
@echo edit build configuration with: "$(BUILD_DIR)/CMakeCache.txt" run make again to rebuild.
@echo Blender successfully built, run from: $(BLENDER_BIN)
@echo
debug: all

View File

@@ -94,7 +94,6 @@ include(cmake/pugixml.cmake)
include(cmake/ispc.cmake)
include(cmake/openimagedenoise.cmake)
include(cmake/embree.cmake)
include(cmake/openpgl.cmake)
include(cmake/fmt.cmake)
include(cmake/robinmap.cmake)
if(NOT APPLE)
@@ -170,8 +169,6 @@ if(UNIX AND NOT APPLE)
include(cmake/libglu.cmake)
include(cmake/mesa.cmake)
include(cmake/wayland_protocols.cmake)
# Can be removed when the build-bot upgrades to v1.20.x or newer.
include(cmake/wayland.cmake)
endif()
include(cmake/harvest.cmake)

View File

@@ -89,7 +89,6 @@ download_source(MESA)
download_source(NASM)
download_source(XR_OPENXR_SDK)
download_source(WL_PROTOCOLS)
download_source(WAYLAND)
download_source(ISPC)
download_source(GMP)
download_source(POTRACE)
@@ -102,7 +101,6 @@ download_source(FMT)
download_source(ROBINMAP)
download_source(IMATH)
download_source(PYSTRING)
download_source(OPENPGL)
download_source(LEVEL_ZERO)
download_source(DPCPP)
download_source(VCINTRINSICS)

View File

@@ -68,7 +68,7 @@ set(DPCPP_EXTRA_ARGS
)
if(WIN32)
list(APPEND DPCPP_EXTRA_ARGS -DPython3_FIND_REGISTRY=NEVER)
list(APPEND DPCPP_EXTRA_ARGS -DPython3_FIND_REGISTRY=NEVER)
endif()
ExternalProject_Add(external_dpcpp

View File

@@ -11,192 +11,188 @@ message("HARVEST_TARGET = ${HARVEST_TARGET}")
if(WIN32)
if(BUILD_MODE STREQUAL Release)
add_custom_target(Harvest_Release_Results
COMMAND # jpeg rename libfile + copy include
${CMAKE_COMMAND} -E copy ${LIBDIR}/jpeg/lib/jpeg-static.lib ${HARVEST_TARGET}/jpeg/lib/libjpeg.lib &&
${CMAKE_COMMAND} -E copy_directory ${LIBDIR}/jpeg/include/ ${HARVEST_TARGET}/jpeg/include/ &&
# png
${CMAKE_COMMAND} -E copy ${LIBDIR}/png/lib/libpng16_static.lib ${HARVEST_TARGET}/png/lib/libpng.lib &&
${CMAKE_COMMAND} -E copy_directory ${LIBDIR}/png/include/ ${HARVEST_TARGET}/png/include/ &&
# freeglut-> opengl
${CMAKE_COMMAND} -E copy ${LIBDIR}/freeglut/lib/freeglut_static.lib ${HARVEST_TARGET}/opengl/lib/freeglut_static.lib &&
${CMAKE_COMMAND} -E copy_directory ${LIBDIR}/freeglut/include/ ${HARVEST_TARGET}/opengl/include/ &&
DEPENDS
if(BUILD_MODE STREQUAL Release)
add_custom_target(Harvest_Release_Results
COMMAND # jpeg rename libfile + copy include
${CMAKE_COMMAND} -E copy ${LIBDIR}/jpeg/lib/jpeg-static.lib ${HARVEST_TARGET}/jpeg/lib/libjpeg.lib &&
${CMAKE_COMMAND} -E copy_directory ${LIBDIR}/jpeg/include/ ${HARVEST_TARGET}/jpeg/include/ &&
# png
${CMAKE_COMMAND} -E copy ${LIBDIR}/png/lib/libpng16_static.lib ${HARVEST_TARGET}/png/lib/libpng.lib &&
${CMAKE_COMMAND} -E copy_directory ${LIBDIR}/png/include/ ${HARVEST_TARGET}/png/include/ &&
# freeglut-> opengl
${CMAKE_COMMAND} -E copy ${LIBDIR}/freeglut/lib/freeglut_static.lib ${HARVEST_TARGET}/opengl/lib/freeglut_static.lib &&
${CMAKE_COMMAND} -E copy_directory ${LIBDIR}/freeglut/include/ ${HARVEST_TARGET}/opengl/include/ &&
DEPENDS
)
endif()
else(WIN32)
function(harvest from to)
set(pattern "")
foreach(f ${ARGN})
set(pattern ${f})
endforeach()
if(pattern STREQUAL "")
get_filename_component(dirpath ${to} DIRECTORY)
get_filename_component(filename ${to} NAME)
install(
FILES ${LIBDIR}/${from}
DESTINATION ${HARVEST_TARGET}/${dirpath}
RENAME ${filename}
)
else()
install(
DIRECTORY ${LIBDIR}/${from}/
DESTINATION ${HARVEST_TARGET}/${to}
USE_SOURCE_PERMISSIONS
FILES_MATCHING PATTERN ${pattern}
PATTERN "pkgconfig" EXCLUDE
PATTERN "cmake" EXCLUDE
PATTERN "__pycache__" EXCLUDE
PATTERN "tests" EXCLUDE
)
endif()
endfunction()
harvest(alembic/include alembic/include "*.h")
harvest(alembic/lib/libAlembic.a alembic/lib/libAlembic.a)
harvest(alembic/bin alembic/bin "*")
harvest(brotli/include brotli/include "*.h")
harvest(brotli/lib brotli/lib "*.a")
harvest(boost/include boost/include "*")
harvest(boost/lib boost/lib "*.a")
harvest(imath/include imath/include "*.h")
harvest(imath/lib imath/lib "*.a")
harvest(ffmpeg/include ffmpeg/include "*.h")
harvest(ffmpeg/lib ffmpeg/lib "*.a")
harvest(fftw3/include fftw3/include "*.h")
harvest(fftw3/lib fftw3/lib "*.a")
harvest(flac/lib sndfile/lib "libFLAC.a")
harvest(freetype/include freetype/include "*.h")
harvest(freetype/lib/libfreetype2ST.a freetype/lib/libfreetype.a)
harvest(epoxy/include epoxy/include "*.h")
harvest(epoxy/lib epoxy/lib "*.a")
harvest(gmp/include gmp/include "*.h")
harvest(gmp/lib gmp/lib "*.a")
harvest(jemalloc/include jemalloc/include "*.h")
harvest(jemalloc/lib jemalloc/lib "*.a")
harvest(jpeg/include jpeg/include "*.h")
harvest(jpeg/lib jpeg/lib "libjpeg.a")
harvest(lame/lib ffmpeg/lib "*.a")
if(NOT APPLE)
harvest(level-zero/include/level_zero level-zero/include/level_zero "*.h")
harvest(level-zero/lib level-zero/lib "*.so*")
endif()
harvest(llvm/bin llvm/bin "clang-format")
if(BUILD_CLANG_TOOLS)
harvest(llvm/bin llvm/bin "clang-tidy")
harvest(llvm/share/clang llvm/share "run-clang-tidy.py")
endif()
harvest(llvm/include llvm/include "*")
harvest(llvm/bin llvm/bin "llvm-config")
harvest(llvm/lib llvm/lib "libLLVM*.a")
harvest(llvm/lib llvm/lib "libclang*.a")
harvest(llvm/lib/clang llvm/lib/clang "*.h")
if(APPLE)
harvest(openmp/lib openmp/lib "*")
harvest(openmp/include openmp/include "*.h")
endif()
if(BLENDER_PLATFORM_ARM)
harvest(sse2neon sse2neon "*.h")
endif()
harvest(ogg/lib ffmpeg/lib "*.a")
harvest(openal/include openal/include "*.h")
if(UNIX AND NOT APPLE)
harvest(openal/lib openal/lib "*.a")
harvest(blosc/include blosc/include "*.h")
harvest(blosc/lib blosc/lib "*.a")
harvest(zlib/include zlib/include "*.h")
harvest(zlib/lib zlib/lib "*.a")
harvest(xml2/include xml2/include "*.h")
harvest(xml2/lib xml2/lib "*.a")
harvest(wayland-protocols/share/wayland-protocols wayland-protocols/share/wayland-protocols/ "*.xml")
else()
harvest(blosc/lib openvdb/lib "*.a")
harvest(xml2/lib opencollada/lib "*.a")
endif()
harvest(opencollada/include/opencollada opencollada/include "*.h")
harvest(opencollada/lib/opencollada opencollada/lib "*.a")
harvest(opencolorio/include opencolorio/include "*.h")
harvest(opencolorio/lib opencolorio/lib "*.a")
harvest(opencolorio/lib/static opencolorio/lib "*.a")
harvest(openexr/include openexr/include "*.h")
harvest(openexr/lib openexr/lib "*.a")
harvest(openimageio/bin openimageio/bin "idiff")
harvest(openimageio/bin openimageio/bin "maketx")
harvest(openimageio/bin openimageio/bin "oiiotool")
harvest(openimageio/include openimageio/include "*")
harvest(openimageio/lib openimageio/lib "*.a")
harvest(openimagedenoise/include openimagedenoise/include "*")
harvest(openimagedenoise/lib openimagedenoise/lib "*.a")
harvest(embree/include embree/include "*.h")
harvest(embree/lib embree/lib "*.a")
harvest(openjpeg/include/openjpeg-${OPENJPEG_SHORT_VERSION} openjpeg/include "*.h")
harvest(openjpeg/lib openjpeg/lib "*.a")
harvest(opensubdiv/include opensubdiv/include "*.h")
harvest(opensubdiv/lib opensubdiv/lib "*.a")
harvest(openvdb/include/openvdb openvdb/include/openvdb "*.h")
harvest(openvdb/include/nanovdb openvdb/include/nanovdb "*.h")
harvest(openvdb/lib openvdb/lib "*.a")
harvest(xr_openxr_sdk/include/openxr xr_openxr_sdk/include/openxr "*.h")
harvest(xr_openxr_sdk/lib xr_openxr_sdk/lib "*.a")
harvest(osl/bin osl/bin "oslc")
harvest(osl/include osl/include "*.h")
harvest(osl/lib osl/lib "*.a")
harvest(osl/share/OSL/shaders osl/share/OSL/shaders "*.h")
harvest(png/include png/include "*.h")
harvest(png/lib png/lib "*.a")
harvest(pugixml/include pugixml/include "*.hpp")
harvest(pugixml/lib pugixml/lib "*.a")
harvest(python/bin python/bin "python${PYTHON_SHORT_VERSION}")
harvest(python/include python/include "*h")
harvest(python/lib python/lib "*")
harvest(sdl/include/SDL2 sdl/include "*.h")
harvest(sdl/lib sdl/lib "libSDL2.a")
harvest(sndfile/include sndfile/include "*.h")
harvest(sndfile/lib sndfile/lib "*.a")
harvest(spnav/include spnav/include "*.h")
harvest(spnav/lib spnav/lib "*.a")
harvest(tbb/include tbb/include "*.h")
harvest(tbb/lib/libtbb_static.a tbb/lib/libtbb.a)
harvest(theora/lib ffmpeg/lib "*.a")
harvest(tiff/include tiff/include "*.h")
harvest(tiff/lib tiff/lib "*.a")
harvest(vorbis/lib ffmpeg/lib "*.a")
harvest(opus/lib ffmpeg/lib "*.a")
harvest(vpx/lib ffmpeg/lib "*.a")
harvest(x264/lib ffmpeg/lib "*.a")
harvest(xvidcore/lib ffmpeg/lib "*.a")
harvest(aom/lib ffmpeg/lib "*.a")
harvest(webp/lib webp/lib "*.a")
harvest(webp/include webp/include "*.h")
harvest(usd/include usd/include "*.h")
harvest(usd/lib/usd usd/lib/usd "*")
harvest(usd/plugin usd/plugin "*")
harvest(potrace/include potrace/include "*.h")
harvest(potrace/lib potrace/lib "*.a")
harvest(haru/include haru/include "*.h")
harvest(haru/lib haru/lib "*.a")
harvest(zstd/include zstd/include "*.h")
harvest(zstd/lib zstd/lib "*.a")
function(harvest from to)
set(pattern "")
foreach(f ${ARGN})
set(pattern ${f})
endforeach()
if(UNIX AND NOT APPLE)
harvest(libglu/lib mesa/lib "*.so*")
harvest(mesa/lib64 mesa/lib "*.so*")
if(pattern STREQUAL "")
get_filename_component(dirpath ${to} DIRECTORY)
get_filename_component(filename ${to} NAME)
install(
FILES ${LIBDIR}/${from}
DESTINATION ${HARVEST_TARGET}/${dirpath}
RENAME ${filename}
)
else()
install(
DIRECTORY ${LIBDIR}/${from}/
DESTINATION ${HARVEST_TARGET}/${to}
USE_SOURCE_PERMISSIONS
FILES_MATCHING PATTERN ${pattern}
PATTERN "pkgconfig" EXCLUDE
PATTERN "cmake" EXCLUDE
PATTERN "__pycache__" EXCLUDE
PATTERN "tests" EXCLUDE
)
endif()
endfunction()
harvest(alembic/include alembic/include "*.h")
harvest(alembic/lib/libAlembic.a alembic/lib/libAlembic.a)
harvest(alembic/bin alembic/bin "*")
harvest(brotli/include brotli/include "*.h")
harvest(brotli/lib brotli/lib "*.a")
harvest(boost/include boost/include "*")
harvest(boost/lib boost/lib "*.a")
harvest(imath/include imath/include "*.h")
harvest(imath/lib imath/lib "*.a")
harvest(ffmpeg/include ffmpeg/include "*.h")
harvest(ffmpeg/lib ffmpeg/lib "*.a")
harvest(fftw3/include fftw3/include "*.h")
harvest(fftw3/lib fftw3/lib "*.a")
harvest(flac/lib sndfile/lib "libFLAC.a")
harvest(freetype/include freetype/include "*.h")
harvest(freetype/lib/libfreetype2ST.a freetype/lib/libfreetype.a)
harvest(epoxy/include epoxy/include "*.h")
harvest(epoxy/lib epoxy/lib "*.a")
harvest(gmp/include gmp/include "*.h")
harvest(gmp/lib gmp/lib "*.a")
harvest(jemalloc/include jemalloc/include "*.h")
harvest(jemalloc/lib jemalloc/lib "*.a")
harvest(jpeg/include jpeg/include "*.h")
harvest(jpeg/lib jpeg/lib "libjpeg.a")
harvest(lame/lib ffmpeg/lib "*.a")
if(NOT APPLE)
harvest(level-zero/include/level_zero level-zero/include/level_zero "*.h")
harvest(level-zero/lib level-zero/lib "*.so*")
endif()
harvest(llvm/bin llvm/bin "clang-format")
if(BUILD_CLANG_TOOLS)
harvest(llvm/bin llvm/bin "clang-tidy")
harvest(llvm/share/clang llvm/share "run-clang-tidy.py")
endif()
harvest(llvm/include llvm/include "*")
harvest(llvm/bin llvm/bin "llvm-config")
harvest(llvm/lib llvm/lib "libLLVM*.a")
harvest(llvm/lib llvm/lib "libclang*.a")
harvest(llvm/lib/clang llvm/lib/clang "*.h")
if(APPLE)
harvest(openmp/lib openmp/lib "*")
harvest(openmp/include openmp/include "*.h")
endif()
if(BLENDER_PLATFORM_ARM)
harvest(sse2neon sse2neon "*.h")
endif()
harvest(ogg/lib ffmpeg/lib "*.a")
harvest(openal/include openal/include "*.h")
if(UNIX AND NOT APPLE)
harvest(openal/lib openal/lib "*.a")
harvest(blosc/include blosc/include "*.h")
harvest(blosc/lib blosc/lib "*.a")
harvest(zlib/include zlib/include "*.h")
harvest(zlib/lib zlib/lib "*.a")
harvest(xml2/include xml2/include "*.h")
harvest(xml2/lib xml2/lib "*.a")
harvest(wayland-protocols/share/wayland-protocols wayland-protocols/share/wayland-protocols/ "*.xml")
harvest(wayland/bin wayland/bin "wayland-scanner")
else()
harvest(blosc/lib openvdb/lib "*.a")
harvest(xml2/lib opencollada/lib "*.a")
endif()
harvest(opencollada/include/opencollada opencollada/include "*.h")
harvest(opencollada/lib/opencollada opencollada/lib "*.a")
harvest(opencolorio/include opencolorio/include "*.h")
harvest(opencolorio/lib opencolorio/lib "*.a")
harvest(opencolorio/lib/static opencolorio/lib "*.a")
harvest(openexr/include openexr/include "*.h")
harvest(openexr/lib openexr/lib "*.a")
harvest(openimageio/bin openimageio/bin "idiff")
harvest(openimageio/bin openimageio/bin "maketx")
harvest(openimageio/bin openimageio/bin "oiiotool")
harvest(openimageio/include openimageio/include "*")
harvest(openimageio/lib openimageio/lib "*.a")
harvest(openimagedenoise/include openimagedenoise/include "*")
harvest(openimagedenoise/lib openimagedenoise/lib "*.a")
harvest(embree/include embree/include "*.h")
harvest(embree/lib embree/lib "*.a")
harvest(openpgl/include openpgl/include "*.h")
harvest(openpgl/lib openpgl/lib "*.a")
harvest(openpgl/lib/cmake/openpgl-${OPENPGL_SHORT_VERSION} openpgl/lib/cmake/openpgl "*.cmake")
harvest(openjpeg/include/openjpeg-${OPENJPEG_SHORT_VERSION} openjpeg/include "*.h")
harvest(openjpeg/lib openjpeg/lib "*.a")
harvest(opensubdiv/include opensubdiv/include "*.h")
harvest(opensubdiv/lib opensubdiv/lib "*.a")
harvest(openvdb/include/openvdb openvdb/include/openvdb "*.h")
harvest(openvdb/include/nanovdb openvdb/include/nanovdb "*.h")
harvest(openvdb/lib openvdb/lib "*.a")
harvest(xr_openxr_sdk/include/openxr xr_openxr_sdk/include/openxr "*.h")
harvest(xr_openxr_sdk/lib xr_openxr_sdk/lib "*.a")
harvest(osl/bin osl/bin "oslc")
harvest(osl/include osl/include "*.h")
harvest(osl/lib osl/lib "*.a")
harvest(osl/share/OSL/shaders osl/share/OSL/shaders "*.h")
harvest(png/include png/include "*.h")
harvest(png/lib png/lib "*.a")
harvest(pugixml/include pugixml/include "*.hpp")
harvest(pugixml/lib pugixml/lib "*.a")
harvest(python/bin python/bin "python${PYTHON_SHORT_VERSION}")
harvest(python/include python/include "*h")
harvest(python/lib python/lib "*")
harvest(sdl/include/SDL2 sdl/include "*.h")
harvest(sdl/lib sdl/lib "libSDL2.a")
harvest(sndfile/include sndfile/include "*.h")
harvest(sndfile/lib sndfile/lib "*.a")
harvest(spnav/include spnav/include "*.h")
harvest(spnav/lib spnav/lib "*.a")
harvest(tbb/include tbb/include "*.h")
harvest(tbb/lib/libtbb_static.a tbb/lib/libtbb.a)
harvest(theora/lib ffmpeg/lib "*.a")
harvest(tiff/include tiff/include "*.h")
harvest(tiff/lib tiff/lib "*.a")
harvest(vorbis/lib ffmpeg/lib "*.a")
harvest(opus/lib ffmpeg/lib "*.a")
harvest(vpx/lib ffmpeg/lib "*.a")
harvest(x264/lib ffmpeg/lib "*.a")
harvest(xvidcore/lib ffmpeg/lib "*.a")
harvest(aom/lib ffmpeg/lib "*.a")
harvest(webp/lib webp/lib "*.a")
harvest(webp/include webp/include "*.h")
harvest(usd/include usd/include "*.h")
harvest(usd/lib/usd usd/lib/usd "*")
harvest(usd/plugin usd/plugin "*")
harvest(potrace/include potrace/include "*.h")
harvest(potrace/lib potrace/lib "*.a")
harvest(haru/include haru/include "*.h")
harvest(haru/lib haru/lib "*.a")
harvest(zstd/include zstd/include "*.h")
harvest(zstd/lib zstd/lib "*.a")
if(UNIX AND NOT APPLE)
harvest(libglu/lib mesa/lib "*.so*")
harvest(mesa/lib64 mesa/lib "*.so*")
harvest(dpcpp dpcpp "*")
harvest(igc dpcpp/lib/igc "*")
harvest(ocloc dpcpp/lib/ocloc "*")
endif()
harvest(dpcpp dpcpp "*")
harvest(igc dpcpp/lib/igc "*")
harvest(ocloc dpcpp/lib/ocloc "*")
endif()
endif()

View File

@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-or-later
if(WIN32)
# CMAKE for MS-Windows.
# cmake for windows
set(JPEG_EXTRA_ARGS
-DNASM=${NASM_PATH}
-DWITH_JPEG8=ON
@@ -33,8 +33,8 @@ if(WIN32)
)
endif()
else()
# CMAKE for UNIX.
else(WIN32)
# cmake for unix
set(JPEG_EXTRA_ARGS
-DWITH_JPEG8=ON
-DENABLE_STATIC=ON

View File

@@ -1,49 +0,0 @@
# SPDX-License-Identifier: GPL-2.0-or-later
# Note the utility apps may use png/tiff/gif system libraries, but the
# library itself does not depend on them, so should give no problems.
set(OPENPGL_EXTRA_ARGS
-DOPENPGL_BUILD_PYTHON=OFF
-DOPENPGL_BUILD_STATIC=ON
-DOPENPGL_TBB_ROOT=${LIBDIR}/tbb
-DTBB_ROOT=${LIBDIR}/tbb
-Dembree_DIR=${LIBDIR}/embree/lib/cmake/embree-${EMBREE_VERSION}
-DCMAKE_DEBUG_POSTFIX=_d
)
if(TBB_STATIC_LIBRARY)
set(OPENPGL_EXTRA_ARGS
${OPENPGL_EXTRA_ARGS}
-DOPENPGL_TBB_COMPONENT=tbb_static
)
endif()
ExternalProject_Add(external_openpgl
URL file://${PACKAGE_DIR}/${OPENPGL_FILE}
DOWNLOAD_DIR ${DOWNLOAD_DIR}
URL_HASH ${OPENPGL_HASH_TYPE}=${OPENPGL_HASH}
PREFIX ${BUILD_DIR}/openpgl
CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${LIBDIR}/openpgl ${DEFAULT_CMAKE_FLAGS} ${OPENPGL_EXTRA_ARGS}
INSTALL_DIR ${LIBDIR}/openpgl
)
add_dependencies(
external_openpgl
external_tbb
external_embree
)
if(WIN32)
if(BUILD_MODE STREQUAL Release)
ExternalProject_Add_Step(external_openpgl after_install
COMMAND ${CMAKE_COMMAND} -E copy_directory ${LIBDIR}/openpgl ${HARVEST_TARGET}/openpgl
DEPENDEES install
)
else()
ExternalProject_Add_Step(external_openpgl after_install
COMMAND ${CMAKE_COMMAND} -E copy ${LIBDIR}/openpgl/lib/openpgl_d.lib ${HARVEST_TARGET}/openpgl/lib/openpgl_d.lib
DEPENDEES install
)
endif()
endif()

View File

@@ -15,7 +15,7 @@ message("BuildMode = ${BUILD_MODE}")
if(BUILD_MODE STREQUAL "Debug")
set(LIBDIR ${CMAKE_CURRENT_BINARY_DIR}/Debug)
else()
else(BUILD_MODE STREQUAL "Debug")
set(LIBDIR ${CMAKE_CURRENT_BINARY_DIR}/Release)
endif()

View File

@@ -458,12 +458,6 @@ set(WL_PROTOCOLS_URI https://gitlab.freedesktop.org/wayland/wayland-protocols/-/
set(WL_PROTOCOLS_HASH af5ca07e13517cdbab33504492cef54a)
set(WL_PROTOCOLS_HASH_TYPE MD5)
set(WAYLAND_VERSION 1.21.0)
set(WAYLAND_FILE wayland-${WAYLAND_VERSION}.tar.xz)
set(WAYLAND_URI https://gitlab.freedesktop.org/wayland/wayland/-/releases/1.21.0/downloads/wayland-${WAYLAND_VERSION}.tar.xz)
set(WAYLAND_HASH f2653a2293bcd882d756c6a83d278903)
set(WAYLAND_HASH_TYPE MD5)
set(ISPC_VERSION v1.17.0)
set(ISPC_URI https://github.com/ispc/ispc/archive/${ISPC_VERSION}.tar.gz)
set(ISPC_HASH 4f476a3109332a77fe839a9014c60ca9)
@@ -506,13 +500,6 @@ set(BROTLI_HASH f9e8d81d0405ba66d181529af42a3354f838c939095ff99930da6aa9cdf6fe46
set(BROTLI_HASH_TYPE SHA256)
set(BROTLI_FILE brotli-${BROTLI_VERSION}.tar.gz)
set(OPENPGL_VERSION v0.3.1-beta)
set(OPENPGL_SHORT_VERSION 0.3.1)
set(OPENPGL_URI https://github.com/OpenPathGuidingLibrary/openpgl/archive/refs/tags/${OPENPGL_VERSION}.tar.gz)
set(OPENPGL_HASH 3830098c485c962018932766199527aab453a8029528dbbc04d4454d82431e2c)
set(OPENPGL_HASH_TYPE SHA256)
set(OPENPGL_FILE openpgl-${OPENPGL_VERSION}.tar.gz)
set(LEVEL_ZERO_VERSION v1.7.15)
set(LEVEL_ZERO_URI https://github.com/oneapi-src/level-zero/archive/refs/tags/${LEVEL_ZERO_VERSION}.tar.gz)
set(LEVEL_ZERO_HASH c39bb05a8e5898aa6c444e1704105b93d3f1888b9c333f8e7e73825ffbfb2617)

View File

@@ -1,19 +0,0 @@
# SPDX-License-Identifier: GPL-2.0-or-later
ExternalProject_Add(external_wayland
URL file://${PACKAGE_DIR}/${WAYLAND_FILE}
DOWNLOAD_DIR ${DOWNLOAD_DIR}
URL_HASH ${WAYLAND_HASH_TYPE}=${WAYLAND_HASH}
PREFIX ${BUILD_DIR}/wayland
PATCH_COMMAND ${PATCH_CMD} -d ${BUILD_DIR}/wayland/src/external_wayland < ${PATCH_DIR}/wayland.diff
# Use `-E` so the `PKG_CONFIG_PATH` can be defined to link against our own LIBEXPAT.
CONFIGURE_COMMAND ${CMAKE_COMMAND} -E env PKG_CONFIG_PATH=${LIBDIR}/expat/lib/pkgconfig
meson --prefix ${LIBDIR}/wayland -Ddocumentation=false -Dtests=false -Dlibraries=false . ../external_wayland
BUILD_COMMAND ninja
INSTALL_COMMAND ninja install
)
add_dependencies(
external_wayland
external_expat
)

View File

@@ -136,7 +136,7 @@ ARGUMENTS_INFO="\"COMMAND LINE ARGUMENTS:
Build and install the OpenImageDenoise libraries.
--with-nanovdb
Build and install NanoVDB together with OpenVDB.
Build and install the NanoVDB branch of OpenVDB (instead of official release of OpenVDB).
--with-jack
Install the jack libraries.
@@ -385,7 +385,7 @@ CLANG_FORMAT_VERSION="10.0"
CLANG_FORMAT_VERSION_MIN="6.0"
CLANG_FORMAT_VERSION_MEX="14.0"
PYTHON_VERSION="3.10.6"
PYTHON_VERSION="3.10.2"
PYTHON_VERSION_SHORT="3.10"
PYTHON_VERSION_MIN="3.10"
PYTHON_VERSION_MEX="3.12"
@@ -425,7 +425,7 @@ PYTHON_ZSTANDARD_VERSION_MIN="0.15.2"
PYTHON_ZSTANDARD_VERSION_MEX="0.20.0"
PYTHON_ZSTANDARD_NAME="zstandard"
PYTHON_NUMPY_VERSION="1.23.2"
PYTHON_NUMPY_VERSION="1.22.0"
PYTHON_NUMPY_VERSION_MIN="1.14"
PYTHON_NUMPY_VERSION_MEX="2.0"
PYTHON_NUMPY_NAME="numpy"
@@ -453,8 +453,8 @@ PYTHON_MODULES_PIP=(
)
BOOST_VERSION="1.80.0"
BOOST_VERSION_SHORT="1.80"
BOOST_VERSION="1.78.0"
BOOST_VERSION_SHORT="1.78"
BOOST_VERSION_MIN="1.49"
BOOST_VERSION_MEX="2.0"
BOOST_FORCE_BUILD=false
@@ -496,7 +496,7 @@ OPENEXR_FORCE_REBUILD=false
OPENEXR_SKIP=false
_with_built_openexr=false
OIIO_VERSION="2.3.18.0"
OIIO_VERSION="2.3.13.0"
OIIO_VERSION_SHORT="2.3"
OIIO_VERSION_MIN="2.1.12"
OIIO_VERSION_MEX="2.4.0"
@@ -534,10 +534,10 @@ OSD_SKIP=false
# OpenVDB needs to be compiled for now
OPENVDB_BLOSC_VERSION="1.21.1"
OPENVDB_VERSION="9.1.0"
OPENVDB_VERSION_SHORT="9.1"
OPENVDB_VERSION="9.0.0"
OPENVDB_VERSION_SHORT="9.0"
OPENVDB_VERSION_MIN="9.0"
OPENVDB_VERSION_MEX="9.2"
OPENVDB_VERSION_MEX="9.1"
OPENVDB_FORCE_BUILD=false
OPENVDB_FORCE_REBUILD=false
OPENVDB_SKIP=false
@@ -2919,10 +2919,6 @@ compile_OPENVDB() {
cmake_d="$cmake_d -D CMAKE_INSTALL_PREFIX=$_inst"
cmake_d="$cmake_d -D USE_STATIC_DEPENDENCIES=OFF"
cmake_d="$cmake_d -D OPENVDB_BUILD_BINARIES=OFF"
# Unfortunately OpenVDB currently forces using recent oneTBB over older versions when it finds it,
# even when TBB_ROOT is specified. So have to prevent any check for system library -
# in the hope it will not break in some other cases.
cmake_d="$cmake_d -D DISABLE_CMAKE_SEARCH_PATHS=ON"
if [ "$WITH_NANOVDB" = true ]; then
cmake_d="$cmake_d -D USE_NANOVDB=ON"
@@ -2935,6 +2931,7 @@ compile_OPENVDB() {
cmake_d="$cmake_d -D Boost_USE_MULTITHREADED=ON"
cmake_d="$cmake_d -D Boost_NO_SYSTEM_PATHS=ON"
cmake_d="$cmake_d -D Boost_NO_BOOST_CMAKE=ON"
cmake_d="$cmake_d -D Boost_NO_BOOST_CMAKE=ON"
fi
if [ -d $INST/tbb ]; then
cmake_d="$cmake_d -D TBB_ROOT=$INST/tbb"
@@ -3198,7 +3195,7 @@ _init_opencollada() {
_inst_shortcut=$INST/opencollada
}
_update_deps_opencollada() {
_update_deps_collada() {
:
}
@@ -6218,7 +6215,7 @@ print_info() {
fi
if [ -d $INST/nanovdb ]; then
_1="-D WITH_NANOVDB=ON"
_2="-D NANOVDB_ROOT_DIR=$INST/openvdb"
_2="-D NANOVDB_ROOT_DIR=$INST/nanovdb"
PRINT " $_1"
PRINT " $_2"
_buildargs="$_buildargs $_1 $_2"

View File

@@ -14,15 +14,3 @@ index 7b894a45..92618215 100644
)
if(CMAKE_TOOLCHAIN_FILE)
set(pystring_CMAKE_ARGS
--- a/src/OpenColorIO/FileRules.cpp
+++ b/src/OpenColorIO/FileRules.cpp
@@ -7,6 +7,9 @@
#include <regex>
#include <sstream>
+/* NOTE: this has been applied up-stream, this edit can be removed after upgrading OpenColorIO. */
+#include <cstring>
+
#include <OpenColorIO/OpenColorIO.h>
#include "CustomKeys.h"

View File

@@ -1,11 +0,0 @@
--- meson.build.orig 2022-06-30 22:59:11.000000000 +0100
+++ meson.build 2022-09-27 13:21:26.428517668 +0100
@@ -2,7 +2,7 @@
'wayland', 'c',
version: '1.21.0',
license: 'MIT',
- meson_version: '>= 0.56.0',
+ meson_version: '>= 0.55.1',
default_options: [
'warning_level=2',
'buildtype=debugoptimized',

View File

@@ -34,17 +34,11 @@ SET(PYTHON_VERSION 3.10 CACHE STRING "Python Version (major and minor only)")
MARK_AS_ADVANCED(PYTHON_VERSION)
if(APPLE)
if(WITH_PYTHON_MODULE)
set(PYTHON_LINKFLAGS "-undefined dynamic_lookup")
else()
set(PYTHON_LINKFLAGS)
endif()
else()
# See: http://docs.python.org/extending/embedding.html#linking-requirements
SET(PYTHON_LINKFLAGS "-Xlinker -export-dynamic" CACHE STRING "Linker flags for python")
MARK_AS_ADVANCED(PYTHON_LINKFLAGS)
endif()
# See: http://docs.python.org/extending/embedding.html#linking-requirements
# for why this is needed
SET(PYTHON_LINKFLAGS "-Xlinker -export-dynamic" CACHE STRING "Linker flags for python")
MARK_AS_ADVANCED(PYTHON_LINKFLAGS)
# if the user passes these defines as args, we don't want to overwrite
SET(_IS_INC_DEF OFF)

View File

@@ -44,7 +44,7 @@ FIND_PROGRAM(SYCL_COMPILER
# compiler.
if(NOT SYCL_COMPILER)
FIND_PROGRAM(SYCL_COMPILER
NAMES
NAMES
dpcpp
HINTS
${_sycl_search_dirs}

View File

@@ -268,8 +268,7 @@ same as the Google Test name (i.e. ``suite.testcase``); see also
cmake_policy(PUSH)
cmake_policy(SET CMP0057 NEW) # if IN_LIST
# -----------------------------------------------------------------------------
#------------------------------------------------------------------------------
function(gtest_add_tests)
if(ARGC LESS 1)

View File

@@ -40,10 +40,12 @@ macro(BLENDER_SRC_GTEST_EX)
set(MANIFEST "${CMAKE_BINARY_DIR}/tests.exe.manifest")
endif()
add_definitions(-DBLENDER_GFLAGS_NAMESPACE=${GFLAGS_NAMESPACE})
add_definitions(${GFLAGS_DEFINES})
add_definitions(${GLOG_DEFINES})
add_executable(${TARGET_NAME} ${ARG_SRC} ${MANIFEST})
setup_platform_linker_flags(${TARGET_NAME})
target_compile_definitions(${TARGET_NAME} PRIVATE ${GFLAGS_DEFINES})
target_compile_definitions(${TARGET_NAME} PRIVATE ${GLOG_DEFINES})
target_include_directories(${TARGET_NAME} PUBLIC "${TEST_INC}")
target_include_directories(${TARGET_NAME} SYSTEM PUBLIC "${TEST_INC_SYS}")
target_link_libraries(${TARGET_NAME} ${ARG_EXTRA_LIBS} ${PLATFORM_LINKLIBS})

View File

@@ -150,10 +150,10 @@ endif()
# BUILD_PLATFORM is taken from CMake
# but BUILD_DATE and BUILD_TIME are platform dependent
if(NOT BUILD_DATE)
string(TIMESTAMP BUILD_DATE "%Y-%m-%d" UTC)
STRING(TIMESTAMP BUILD_DATE "%Y-%m-%d" UTC)
endif()
if(NOT BUILD_TIME)
string(TIMESTAMP BUILD_TIME "%H:%M:%S" UTC)
STRING(TIMESTAMP BUILD_TIME "%H:%M:%S" UTC)
endif()
# Write a file with the BUILD_HASH define

View File

@@ -17,7 +17,6 @@ set(WITH_COMPOSITOR_CPU ON CACHE BOOL "" FORCE)
set(WITH_CYCLES ON CACHE BOOL "" FORCE)
set(WITH_CYCLES_EMBREE ON CACHE BOOL "" FORCE)
set(WITH_CYCLES_OSL ON CACHE BOOL "" FORCE)
set(WITH_CYCLES_PATH_GUIDING ON CACHE BOOL "" FORCE)
set(WITH_DRACO ON CACHE BOOL "" FORCE)
set(WITH_FFTW3 ON CACHE BOOL "" FORCE)
set(WITH_FREESTYLE ON CACHE BOOL "" FORCE)

View File

@@ -7,6 +7,8 @@
# cmake -C../blender/build_files/cmake/config/blender_lite.cmake ../blender
#
set(WITH_INSTALL_PORTABLE ON CACHE BOOL "" FORCE)
set(WITH_ALEMBIC OFF CACHE BOOL "" FORCE)
set(WITH_AUDASPACE OFF CACHE BOOL "" FORCE)
set(WITH_BLENDER_THUMBNAILER OFF CACHE BOOL "" FORCE)

View File

@@ -18,7 +18,6 @@ set(WITH_COMPOSITOR_CPU ON CACHE BOOL "" FORCE)
set(WITH_CYCLES ON CACHE BOOL "" FORCE)
set(WITH_CYCLES_EMBREE ON CACHE BOOL "" FORCE)
set(WITH_CYCLES_OSL ON CACHE BOOL "" FORCE)
set(WITH_CYCLES_PATH_GUIDING ON CACHE BOOL "" FORCE)
set(WITH_DRACO ON CACHE BOOL "" FORCE)
set(WITH_FFTW3 ON CACHE BOOL "" FORCE)
set(WITH_FREESTYLE ON CACHE BOOL "" FORCE)

View File

@@ -8,81 +8,41 @@
set(WITH_PYTHON_MODULE ON CACHE BOOL "" FORCE)
# install into the systems python dir
set(WITH_INSTALL_PORTABLE OFF CACHE BOOL "" FORCE)
# -----------------------------------------------------------------------------
# Installation Configuration.
#
# NOTE: `WITH_INSTALL_PORTABLE` always defaults to ON when building as a Python module and
# isn't set here as it makes changing the setting impractical.
# Python-developers could prefer either ON/OFF depending on their usage:
#
# - When using the system's Python, disabling will install into their `site-packages`,
# allowing them to run Python from any directory and `import bpy`.
# - When using Blender's bundled Python in `./../lib/` it will install there
# which isn't especially useful as it requires running Python from this directory too.
#
# So default `WITH_INSTALL_PORTABLE` to ON, and developers who don't use Python from `./../lib/`
# can disable it if they wish to install into their systems Python.
# There is no point in copying python into Python.
# no point int copying python into python
set(WITH_PYTHON_INSTALL OFF CACHE BOOL "" FORCE)
# disable audio, its possible some devs may want this but for now disable
# so the python module doesn't hold the audio device and loads quickly.
set(WITH_AUDASPACE OFF CACHE BOOL "" FORCE)
set(WITH_CODEC_FFMPEG OFF CACHE BOOL "" FORCE)
set(WITH_CODEC_SNDFILE OFF CACHE BOOL "" FORCE)
set(WITH_COREAUDIO OFF CACHE BOOL "" FORCE)
set(WITH_JACK OFF CACHE BOOL "" FORCE)
set(WITH_OPENAL OFF CACHE BOOL "" FORCE)
set(WITH_PULSEAUDIO OFF CACHE BOOL "" FORCE)
set(WITH_SDL OFF CACHE BOOL "" FORCE)
set(WITH_WASAPI OFF CACHE BOOL "" FORCE)
# other features which are not especially useful as a python module
set(WITH_ALEMBIC OFF CACHE BOOL "" FORCE)
set(WITH_BULLET OFF CACHE BOOL "" FORCE)
set(WITH_INPUT_NDOF OFF CACHE BOOL "" FORCE)
set(WITH_INTERNATIONAL OFF CACHE BOOL "" FORCE)
set(WITH_NANOVDB OFF CACHE BOOL "" FORCE)
set(WITH_OPENCOLLADA OFF CACHE BOOL "" FORCE)
set(WITH_OPENVDB OFF CACHE BOOL "" FORCE)
set(WITH_X11_XINPUT OFF CACHE BOOL "" FORCE)
# Depends on Python install, do this to quiet warning.
set(WITH_DRACO OFF CACHE BOOL "" FORCE)
if(WIN32)
set(WITH_WINDOWS_BUNDLE_CRT OFF CACHE BOOL "" FORCE)
endif()
# -----------------------------------------------------------------------------
# Library Compatibility.
# JEMALLOC does not work with `dlopen()` of Python modules:
# Jemalloc does not work with dlopen() of Python modules:
# https://github.com/jemalloc/jemalloc/issues/1237
set(WITH_MEM_JEMALLOC OFF CACHE BOOL "" FORCE)
# -----------------------------------------------------------------------------
# Application Support.
# Not useful to include with the Python module.
# Although a way to extract this from Python could be handle,
# this would be better exposed directly via the Python API.
set(WITH_BLENDER_THUMBNAILER OFF CACHE BOOL "" FORCE)
# -----------------------------------------------------------------------------
# Audio Support.
# Disable audio, its possible some developers may want this but for now disable
# so the Python module doesn't hold the audio device and loads quickly.
set(WITH_AUDASPACE OFF CACHE BOOL "" FORCE)
set(WITH_JACK OFF CACHE BOOL "" FORCE)
set(WITH_OPENAL OFF CACHE BOOL "" FORCE)
set(WITH_SDL OFF CACHE BOOL "" FORCE)
if(UNIX AND NOT APPLE)
set(WITH_PULSEAUDIO OFF CACHE BOOL "" FORCE)
endif()
if(WIN32)
set(WITH_WASAPI OFF CACHE BOOL "" FORCE)
set(WITH_WINDOWS_BUNDLE_CRT OFF CACHE BOOL "" FORCE)
endif()
if(APPLE)
set(WITH_COREAUDIO OFF CACHE BOOL "" FORCE)
endif()
# -----------------------------------------------------------------------------
# Input Device Support.
# Other features which are not especially useful as a python module.
set(WITH_INPUT_NDOF OFF CACHE BOOL "" FORCE)
if(WIN32 OR APPLE)
set(WITH_INPUT_IME OFF CACHE BOOL "" FORCE)
endif()
# -----------------------------------------------------------------------------
# Language Support.
set(WITH_INTERNATIONAL OFF CACHE BOOL "" FORCE)

View File

@@ -134,11 +134,12 @@ endfunction()
# Nicer makefiles with -I/1/foo/ instead of -I/1/2/3/../../foo/
# use it instead of include_directories()
function(absolute_include_dirs
includes_absolute)
function(blender_include_dirs
includes
)
set(_ALL_INCS "")
foreach(_INC ${ARGN})
foreach(_INC ${ARGV})
get_filename_component(_ABS_INC ${_INC} ABSOLUTE)
list(APPEND _ALL_INCS ${_ABS_INC})
# for checking for invalid includes, disable for regular use
@@ -146,24 +147,22 @@ function(absolute_include_dirs
# message(FATAL_ERROR "Include not found: ${_ABS_INC}/")
# endif()
endforeach()
set(${includes_absolute} ${_ALL_INCS} PARENT_SCOPE)
include_directories(${_ALL_INCS})
endfunction()
function(blender_target_include_dirs
name
function(blender_include_dirs_sys
includes
)
absolute_include_dirs(_ALL_INCS ${ARGN})
target_include_directories(${name} PRIVATE ${_ALL_INCS})
endfunction()
function(blender_target_include_dirs_sys
name
)
absolute_include_dirs(_ALL_INCS ${ARGN})
target_include_directories(${name} SYSTEM PRIVATE ${_ALL_INCS})
set(_ALL_INCS "")
foreach(_INC ${ARGV})
get_filename_component(_ABS_INC ${_INC} ABSOLUTE)
list(APPEND _ALL_INCS ${_ABS_INC})
# if(NOT EXISTS "${_ABS_INC}/")
# message(FATAL_ERROR "Include not found: ${_ABS_INC}/")
# endif()
endforeach()
include_directories(SYSTEM ${_ALL_INCS})
endfunction()
# Set include paths for header files included with "*.h" syntax.
@@ -269,10 +268,12 @@ function(blender_add_lib__impl
# message(STATUS "Configuring library ${name}")
add_library(${name} ${sources})
# include_directories(${includes})
# include_directories(SYSTEM ${includes_sys})
blender_include_dirs("${includes}")
blender_include_dirs_sys("${includes_sys}")
blender_target_include_dirs(${name} ${includes})
blender_target_include_dirs_sys(${name} ${includes_sys})
add_library(${name} ${sources})
# On Windows certain libraries have two sets of binaries: one for debug builds and one for
# release builds. The root of this requirement goes into ABI, I believe, but that's outside
@@ -381,7 +382,7 @@ function(blender_add_test_suite)
cmake_parse_arguments(ARGS "" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
# Figure out the release dir, as some tests need files from there.
get_blender_test_install_dir(TEST_INSTALL_DIR)
GET_BLENDER_TEST_INSTALL_DIR(TEST_INSTALL_DIR)
if(APPLE)
set(_test_release_dir ${TEST_INSTALL_DIR}/Blender.app/Contents/Resources/${BLENDER_VERSION})
else()
@@ -424,21 +425,21 @@ function(blender_add_test_lib
# This duplicates logic that's also in GTestTesting.cmake, macro BLENDER_SRC_GTEST_EX.
# TODO(Sybren): deduplicate after the general approach in D7649 has been approved.
list(APPEND includes
LIST(APPEND includes
${CMAKE_SOURCE_DIR}/tests/gtests
)
list(APPEND includes_sys
LIST(APPEND includes_sys
${GLOG_INCLUDE_DIRS}
${GFLAGS_INCLUDE_DIRS}
${CMAKE_SOURCE_DIR}/extern/gtest/include
${CMAKE_SOURCE_DIR}/extern/gmock/include
)
add_definitions(-DBLENDER_GFLAGS_NAMESPACE=${GFLAGS_NAMESPACE})
add_definitions(${GFLAGS_DEFINES})
add_definitions(${GLOG_DEFINES})
blender_add_lib__impl(${name} "${sources}" "${includes}" "${includes_sys}" "${library_deps}")
target_compile_definitions(${name} PRIVATE ${GFLAGS_DEFINES})
target_compile_definitions(${name} PRIVATE ${GLOG_DEFINES})
set_property(GLOBAL APPEND PROPERTY BLENDER_TEST_LIBS ${name})
blender_add_test_suite(
@@ -468,16 +469,16 @@ function(blender_add_test_executable
## Otherwise external projects will produce warnings that we cannot fix.
remove_strict_flags()
blender_src_gtest_ex(
include_directories(${includes})
include_directories(${includes_sys})
BLENDER_SRC_GTEST_EX(
NAME ${name}
SRC "${sources}"
EXTRA_LIBS "${library_deps}"
SKIP_ADD_TEST
)
blender_target_include_dirs(${name}_test ${includes})
blender_target_include_dirs_sys(${name}_test ${includes_sys})
blender_add_test_suite(
TARGET ${name}_test
SUITE_NAME ${name}
@@ -512,11 +513,6 @@ function(setup_platform_linker_flags
set_property(TARGET ${target} APPEND_STRING PROPERTY LINK_FLAGS " ${PLATFORM_LINKFLAGS}")
set_property(TARGET ${target} APPEND_STRING PROPERTY LINK_FLAGS_RELEASE " ${PLATFORM_LINKFLAGS_RELEASE}")
set_property(TARGET ${target} APPEND_STRING PROPERTY LINK_FLAGS_DEBUG " ${PLATFORM_LINKFLAGS_DEBUG}")
get_target_property(target_type ${target} TYPE)
if (target_type STREQUAL "EXECUTABLE")
set_property(TARGET ${target} APPEND_STRING PROPERTY LINK_FLAGS " ${PLATFORM_LINKFLAGS_EXECUTABLE}")
endif()
endfunction()
# Platform specific libraries for targets.
@@ -764,7 +760,7 @@ function(ADD_CHECK_C_COMPILER_FLAG
include(CheckCCompilerFlag)
check_c_compiler_flag("${_FLAG}" "${_CACHE_VAR}")
CHECK_C_COMPILER_FLAG("${_FLAG}" "${_CACHE_VAR}")
if(${_CACHE_VAR})
# message(STATUS "Using CFLAG: ${_FLAG}")
set(${_CFLAGS} "${${_CFLAGS}} ${_FLAG}" PARENT_SCOPE)
@@ -781,7 +777,7 @@ function(ADD_CHECK_CXX_COMPILER_FLAG
include(CheckCXXCompilerFlag)
check_cxx_compiler_flag("${_FLAG}" "${_CACHE_VAR}")
CHECK_CXX_COMPILER_FLAG("${_FLAG}" "${_CACHE_VAR}")
if(${_CACHE_VAR})
# message(STATUS "Using CXXFLAG: ${_FLAG}")
set(${_CXXFLAGS} "${${_CXXFLAGS}} ${_FLAG}" PARENT_SCOPE)
@@ -799,11 +795,9 @@ function(get_blender_version)
# - BLENDER_VERSION_PATCH
# - BLENDER_VERSION_CYCLE (alpha, beta, rc, release)
# So CMAKE depends on `BKE_blender.h`, beware of infinite-loops!
configure_file(
${CMAKE_SOURCE_DIR}/source/blender/blenkernel/BKE_blender_version.h
${CMAKE_BINARY_DIR}/source/blender/blenkernel/BKE_blender_version.h.done
)
# So cmake depends on BKE_blender.h, beware of inf-loops!
CONFIGURE_FILE(${CMAKE_SOURCE_DIR}/source/blender/blenkernel/BKE_blender_version.h
${CMAKE_BINARY_DIR}/source/blender/blenkernel/BKE_blender_version.h.done)
file(STRINGS ${CMAKE_SOURCE_DIR}/source/blender/blenkernel/BKE_blender_version.h _contents REGEX "^#define[ \t]+BLENDER_.*$")
@@ -1190,6 +1184,8 @@ macro(openmp_delayload
if(WITH_OPENMP)
if(MSVC_CLANG)
set(OPENMP_DLL_NAME "libomp")
elseif(MSVC_VERSION EQUAL 1800)
set(OPENMP_DLL_NAME "vcomp120")
else()
set(OPENMP_DLL_NAME "vcomp140")
endif()
@@ -1212,8 +1208,16 @@ endmacro()
macro(without_system_libs_begin)
set(CMAKE_IGNORE_PATH "${CMAKE_PLATFORM_IMPLICIT_LINK_DIRECTORIES};${CMAKE_SYSTEM_INCLUDE_PATH};${CMAKE_C_IMPLICIT_INCLUDE_DIRECTORIES};${CMAKE_CXX_IMPLICIT_INCLUDE_DIRECTORIES}")
if(APPLE)
# Avoid searching for headers in frameworks (like Mono), and libraries in LIBDIR.
set(CMAKE_FIND_FRAMEWORK NEVER)
endif()
endmacro()
macro(without_system_libs_end)
unset(CMAKE_IGNORE_PATH)
if(APPLE)
# FIRST is the default.
set(CMAKE_FIND_FRAMEWORK FIRST)
endif()
endmacro()

View File

@@ -17,9 +17,9 @@ set(CPACK_PACKAGE_VENDOR ${PROJECT_VENDOR})
set(CPACK_PACKAGE_CONTACT ${PROJECT_CONTACT})
set(CPACK_RESOURCE_FILE_LICENSE "${CMAKE_SOURCE_DIR}/COPYING")
set(CPACK_PACKAGE_INSTALL_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}")
set(CPACK_PACKAGE_VERSION_MAJOR "${MAJOR_VERSION}")
set(CPACK_PACKAGE_VERSION_MINOR "${MINOR_VERSION}")
set(CPACK_PACKAGE_VERSION_PATCH "${PATCH_VERSION}")
SET(CPACK_PACKAGE_VERSION_MAJOR "${MAJOR_VERSION}")
SET(CPACK_PACKAGE_VERSION_MINOR "${MINOR_VERSION}")
SET(CPACK_PACKAGE_VERSION_PATCH "${PATCH_VERSION}")
# Get the build revision, note that this can get out-of-sync, so for packaging run cmake first.
@@ -48,7 +48,7 @@ if(MSVC)
else()
set(PACKAGE_ARCH windows32)
endif()
else()
else(MSVC)
set(PACKAGE_ARCH ${CMAKE_SYSTEM_PROCESSOR})
endif()

View File

@@ -30,7 +30,7 @@ macro(add_bundled_libraries library)
list(APPEND PLATFORM_BUNDLED_LIBRARY_DIRS ${_library_dir})
unset(_all_library_versions)
unset(_library_dir)
endif()
endif()
endmacro()
# ------------------------------------------------------------------------
@@ -75,15 +75,6 @@ if(NOT EXISTS "${LIBDIR}/")
message(FATAL_ERROR "Mac OSX requires pre-compiled libs at: '${LIBDIR}'")
endif()
# Avoid searching for headers since this would otherwise override our lib
# directory as well as PYTHON_ROOT_DIR.
set(CMAKE_FIND_FRAMEWORK NEVER)
# Optionally use system Python if PYTHON_ROOT_DIR is specified.
if(WITH_PYTHON AND (WITH_PYTHON_MODULE AND PYTHON_ROOT_DIR))
find_package(PythonLibsUnix REQUIRED)
endif()
# Prefer lib directory paths
file(GLOB LIB_SUBDIRS ${LIBDIR}/*)
set(CMAKE_PREFIX_PATH ${LIB_SUBDIRS})
@@ -132,8 +123,34 @@ if(WITH_CODEC_SNDFILE)
unset(_sndfile_VORBISENC_LIBRARY)
endif()
if(WITH_PYTHON AND NOT (WITH_PYTHON_MODULE AND PYTHON_ROOT_DIR))
find_package(PythonLibsUnix REQUIRED)
if(WITH_PYTHON)
# Use precompiled libraries by default.
set(PYTHON_VERSION 3.10)
if(NOT WITH_PYTHON_MODULE AND NOT WITH_PYTHON_FRAMEWORK)
# Normally cached but not since we include them with blender.
set(PYTHON_INCLUDE_DIR "${LIBDIR}/python/include/python${PYTHON_VERSION}")
set(PYTHON_EXECUTABLE "${LIBDIR}/python/bin/python${PYTHON_VERSION}")
set(PYTHON_LIBRARY ${LIBDIR}/python/lib/libpython${PYTHON_VERSION}.a)
set(PYTHON_LIBPATH "${LIBDIR}/python/lib/python${PYTHON_VERSION}")
else()
# Module must be compiled against Python framework.
set(_py_framework "/Library/Frameworks/Python.framework/Versions/${PYTHON_VERSION}")
set(PYTHON_INCLUDE_DIR "${_py_framework}/include/python${PYTHON_VERSION}")
set(PYTHON_EXECUTABLE "${_py_framework}/bin/python${PYTHON_VERSION}")
set(PYTHON_LIBPATH "${_py_framework}/lib/python${PYTHON_VERSION}")
unset(_py_framework)
endif()
# uncached vars
set(PYTHON_INCLUDE_DIRS "${PYTHON_INCLUDE_DIR}")
set(PYTHON_LIBRARIES "${PYTHON_LIBRARY}")
# needed for Audaspace, numpy is installed into python site-packages
set(PYTHON_NUMPY_INCLUDE_DIRS "${PYTHON_LIBPATH}/site-packages/numpy/core/include")
if(NOT EXISTS "${PYTHON_EXECUTABLE}")
message(FATAL_ERROR "Python executable missing: ${PYTHON_EXECUTABLE}")
endif()
endif()
if(WITH_FFTW3)
@@ -196,6 +213,11 @@ if(WITH_JACK)
string(APPEND PLATFORM_LINKFLAGS " -F/Library/Frameworks -weak_framework jackmp")
endif()
if(WITH_PYTHON_MODULE OR WITH_PYTHON_FRAMEWORK)
# force cmake to link right framework
string(APPEND PLATFORM_LINKFLAGS " /Library/Frameworks/Python.framework/Versions/${PYTHON_VERSION}/Python")
endif()
if(WITH_OPENCOLLADA)
find_package(OpenCOLLADA)
find_library(PCRE_LIBRARIES NAMES pcre HINTS ${LIBDIR}/opencollada/lib)
@@ -324,7 +346,7 @@ if(WITH_LLVM)
if(WITH_CLANG)
find_package(Clang)
if(NOT CLANG_FOUND)
message(FATAL_ERROR "Clang not found.")
message(FATAL_ERROR "Clang not found.")
endif()
endif()
@@ -352,6 +374,10 @@ endif()
if(WITH_CYCLES AND WITH_CYCLES_EMBREE)
find_package(Embree 3.8.0 REQUIRED)
# Increase stack size for Embree, only works for executables.
if(NOT WITH_PYTHON_MODULE)
string(APPEND PLATFORM_LINKFLAGS " -Wl,-stack_size,0x100000")
endif()
# Embree static library linking can mix up SSE and AVX symbols, causing
# crashes on macOS systems with older CPUs that don't have AVX. Using
@@ -436,9 +462,6 @@ if(EXISTS ${LIBDIR})
without_system_libs_end()
endif()
# Restore to default.
set(CMAKE_FIND_FRAMEWORK FIRST)
# ---------------------------------------------------------------------
# Set compiler and linker flags.
@@ -471,9 +494,6 @@ string(APPEND PLATFORM_LINKFLAGS
string(APPEND CMAKE_CXX_FLAGS " -stdlib=libc++")
string(APPEND PLATFORM_LINKFLAGS " -stdlib=libc++")
# Make stack size more similar to Embree, required for Embree.
string(APPEND PLATFORM_LINKFLAGS_EXECUTABLE " -Wl,-stack_size,0x100000")
# Suppress ranlib "has no symbols" warnings (workaround for T48250)
set(CMAKE_C_ARCHIVE_CREATE "<CMAKE_AR> Scr <TARGET> <LINK_FLAGS> <OBJECTS>")
set(CMAKE_CXX_ARCHIVE_CREATE "<CMAKE_AR> Scr <TARGET> <LINK_FLAGS> <OBJECTS>")

View File

@@ -16,15 +16,13 @@ if(NOT DEFINED LIBDIR)
# Choose the best suitable libraries.
if(EXISTS ${LIBDIR_NATIVE_ABI})
set(LIBDIR ${LIBDIR_NATIVE_ABI})
set(WITH_LIBC_MALLOC_HOOK_WORKAROUND True)
elseif(EXISTS ${LIBDIR_CENTOS7_ABI})
set(LIBDIR ${LIBDIR_CENTOS7_ABI})
set(WITH_CXX11_ABI OFF)
if(WITH_MEM_JEMALLOC)
# jemalloc provides malloc hooks.
set(WITH_LIBC_MALLOC_HOOK_WORKAROUND False)
else()
set(WITH_LIBC_MALLOC_HOOK_WORKAROUND True)
if(CMAKE_COMPILER_IS_GNUCC AND
CMAKE_C_COMPILER_VERSION VERSION_LESS 9.3)
message(FATAL_ERROR "GCC version must be at least 9.3 for precompiled libraries, found ${CMAKE_C_COMPILER_VERSION}")
endif()
endif()
@@ -89,7 +87,7 @@ macro(add_bundled_libraries library)
file(GLOB _all_library_versions ${LIBDIR}/${library}/lib/*\.so*)
list(APPEND PLATFORM_BUNDLED_LIBRARIES ${_all_library_versions})
unset(_all_library_versions)
endif()
endif()
endmacro()
# ----------------------------------------------------------------------------
@@ -138,38 +136,14 @@ if(NOT WITH_SYSTEM_FREETYPE)
endif()
if(WITH_PYTHON)
# This could be used, see: D14954 for details.
# `find_package(PythonLibs)`
# No way to set py35, remove for now.
# find_package(PythonLibs)
# Use our own instead, since without Python is such a rare case,
# require this package.
# XXX: Linking errors with Debian static Python (sigh).
# find_package_wrapper(PythonLibsUnix REQUIRED)
# Use our own instead, since without py is such a rare case,
# require this package
# XXX Linking errors with debian static python :/
# find_package_wrapper(PythonLibsUnix REQUIRED)
find_package(PythonLibsUnix REQUIRED)
if(WITH_PYTHON_MODULE AND NOT WITH_INSTALL_PORTABLE)
# Installing into `site-packages`, warn when installing into `./../lib/`
# which script authors almost certainly don't want.
if(EXISTS ${LIBDIR})
cmake_path(IS_PREFIX LIBDIR "${PYTHON_SITE_PACKAGES}" NORMALIZE _is_prefix)
if(_is_prefix)
message(WARNING "
Building Blender with the following configuration:
- WITH_PYTHON_MODULE=ON
- WITH_INSTALL_PORTABLE=OFF
- LIBDIR=\"${LIBDIR}\"
- PYTHON_SITE_PACKAGES=\"${PYTHON_SITE_PACKAGES}\"
In this case you may want to either:
- Use the system Python's site-packages, see:
python -c \"import site; print(site.getsitepackages()[0])\"
- Set WITH_INSTALL_PORTABLE=ON to create a stand-alone \"bpy\" module
which you will need to ensure is in Python's module search path.
Proceeding with PYTHON_SITE_PACKAGES install target, you have been warned!"
)
endif()
unset(_is_prefix)
endif()
endif()
endif()
if(WITH_IMAGE_OPENEXR)
@@ -764,44 +738,7 @@ if(WITH_GHOST_WAYLAND)
add_definitions(-DWITH_GHOST_WAYLAND_LIBDECOR)
endif()
if(EXISTS "${LIBDIR}/wayland/bin/wayland-scanner")
set(WAYLAND_SCANNER "${LIBDIR}/wayland/bin/wayland-scanner")
else()
pkg_get_variable(WAYLAND_SCANNER wayland-scanner wayland_scanner)
endif()
# When using dynamic loading, headers generated
# from older versions of `wayland-scanner` aren't compatible.
if(WITH_GHOST_WAYLAND_DYNLOAD)
execute_process(
COMMAND ${WAYLAND_SCANNER} --version
# The version is written to the `stderr`.
ERROR_VARIABLE _wayland_scanner_out
ERROR_STRIP_TRAILING_WHITESPACE
)
if(NOT "${_wayland_scanner_out}" STREQUAL "")
string(
REGEX REPLACE
"^wayland-scanner[ \t]+([0-9]+)\.([0-9]+).*"
"\\1.\\2"
_wayland_scanner_ver
"${_wayland_scanner_out}"
)
if("${_wayland_scanner_ver}" VERSION_LESS "1.20")
message(
FATAL_ERROR
"Found ${WAYLAND_SCANNER} version \"${_wayland_scanner_ver}\", "
"the minimum version is 1.20!"
)
endif()
unset(_wayland_scanner_ver)
else()
message(WARNING "Unable to access the version from ${WAYLAND_SCANNER}, continuing.")
endif()
unset(_wayland_scanner_out)
endif()
# End wayland-scanner version check.
pkg_get_variable(WAYLAND_SCANNER wayland-scanner wayland_scanner)
endif()
endif()
@@ -1088,7 +1025,7 @@ function(CONFIGURE_ATOMIC_LIB_IF_NEEDED)
endif()
endfunction()
configure_atomic_lib_if_needed()
CONFIGURE_ATOMIC_LIB_IF_NEEDED()
if(PLATFORM_BUNDLED_LIBRARIES)
# For the installed Python module and installed Blender executable, we set the

View File

@@ -26,7 +26,7 @@ if(CMAKE_C_COMPILER_ID MATCHES "Clang")
set(OPENMP_FOUND ON)
set(OpenMP_C_FLAGS "/clang:-fopenmp")
set(OpenMP_CXX_FLAGS "/clang:-fopenmp")
get_filename_component(LLVMROOT "[HKEY_LOCAL_MACHINE\\SOFTWARE\\WOW6432Node\\LLVM\\LLVM;]" ABSOLUTE CACHE)
GET_FILENAME_COMPONENT(LLVMROOT "[HKEY_LOCAL_MACHINE\\SOFTWARE\\WOW6432Node\\LLVM\\LLVM;]" ABSOLUTE CACHE)
set(CLANG_OPENMP_DLL "${LLVMROOT}/bin/libomp.dll")
set(CLANG_OPENMP_LIB "${LLVMROOT}/lib/libomp.lib")
if(NOT EXISTS "${CLANG_OPENMP_DLL}")
@@ -74,6 +74,27 @@ add_definitions(-DWIN32)
add_compile_options("$<$<C_COMPILER_ID:MSVC>:/utf-8>")
add_compile_options("$<$<CXX_COMPILER_ID:MSVC>:/utf-8>")
# Minimum MSVC Version
if(CMAKE_CXX_COMPILER_ID MATCHES MSVC)
if(MSVC_VERSION EQUAL 1800)
set(_min_ver "18.0.31101")
if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS ${_min_ver})
message(FATAL_ERROR
"Visual Studio 2013 (Update 4, ${_min_ver}) required, "
"found (${CMAKE_CXX_COMPILER_VERSION})")
endif()
endif()
if(MSVC_VERSION EQUAL 1900)
set(_min_ver "19.0.24210")
if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS ${_min_ver})
message(FATAL_ERROR
"Visual Studio 2015 (Update 3, ${_min_ver}) required, "
"found (${CMAKE_CXX_COMPILER_VERSION})")
endif()
endif()
endif()
unset(_min_ver)
# needed for some MSVC installations
# 4099 : PDB 'filename' was not found with 'object/library'
string(APPEND CMAKE_EXE_LINKER_FLAGS " /SAFESEH:NO /ignore:4099")
@@ -137,7 +158,7 @@ endif()
# C++ standards conformace (/permissive-) is available on msvc 15.5 (1912) and up
if(NOT MSVC_CLANG)
if(MSVC_VERSION GREATER 1911 AND NOT MSVC_CLANG)
string(APPEND CMAKE_CXX_FLAGS " /permissive-")
# Two-phase name lookup does not place nicely with OpenMP yet, so disable for now
string(APPEND CMAKE_CXX_FLAGS " /Zc:twoPhase-")
@@ -197,7 +218,7 @@ unset(SYMBOL_FORMAT)
unset(SYMBOL_FORMAT_RELEASE)
# JMC is available on msvc 15.8 (1915) and up
if(NOT MSVC_CLANG)
if(MSVC_VERSION GREATER 1914 AND NOT MSVC_CLANG)
string(APPEND CMAKE_CXX_FLAGS_DEBUG " /JMC")
endif()
@@ -230,6 +251,9 @@ if(NOT DEFINED LIBDIR)
elseif(MSVC_VERSION GREATER 1919)
message(STATUS "Visual Studio 2019 detected.")
set(LIBDIR ${CMAKE_SOURCE_DIR}/../lib/${LIBDIR_BASE}_vc15)
elseif(MSVC_VERSION GREATER 1909)
message(STATUS "Visual Studio 2017 detected.")
set(LIBDIR ${CMAKE_SOURCE_DIR}/../lib/${LIBDIR_BASE}_vc15)
endif()
else()
message(STATUS "Using pre-compiled LIBDIR: ${LIBDIR}")
@@ -240,8 +264,10 @@ endif()
include(platform_old_libs_update)
# Only supported in the VS IDE & Clang Tidy needs to be on.
if(CMAKE_GENERATOR MATCHES "^Visual Studio.+" AND WITH_CLANG_TIDY)
if(CMAKE_GENERATOR MATCHES "^Visual Studio.+" AND # Only supported in the VS IDE
MSVC_VERSION GREATER_EQUAL 1924 AND # Supported for 16.4+
WITH_CLANG_TIDY # And Clang Tidy needs to be on
)
set(CMAKE_VS_GLOBALS
"RunCodeAnalysis=false"
"EnableMicrosoftCodeAnalysis=false"
@@ -252,7 +278,8 @@ endif()
# Mark libdir as system headers with a lower warn level, to resolve some warnings
# that we have very little control over
if(NOT MSVC_CLANG AND # Available with MSVC 15.7+ but not for CLANG.
if(MSVC_VERSION GREATER_EQUAL 1914 AND # Available with 15.7+
NOT MSVC_CLANG AND # But not for clang
NOT WITH_WINDOWS_SCCACHE AND # And not when sccache is enabled
NOT VS_CLANG_TIDY) # Clang-tidy does not like these options
add_compile_options(/experimental:external /external:templates- /external:I "${LIBDIR}" /external:W0)
@@ -477,16 +504,12 @@ if(WITH_JACK)
endif()
if(WITH_PYTHON)
# Cache version for make_bpy_wheel.py to detect.
unset(PYTHON_VERSION CACHE)
set(PYTHON_VERSION "3.10" CACHE STRING "Python version")
set(PYTHON_VERSION 3.10) # CACHE STRING)
string(REPLACE "." "" _PYTHON_VERSION_NO_DOTS ${PYTHON_VERSION})
set(PYTHON_LIBRARY ${LIBDIR}/python/${_PYTHON_VERSION_NO_DOTS}/libs/python${_PYTHON_VERSION_NO_DOTS}.lib)
set(PYTHON_LIBRARY_DEBUG ${LIBDIR}/python/${_PYTHON_VERSION_NO_DOTS}/libs/python${_PYTHON_VERSION_NO_DOTS}_d.lib)
set(PYTHON_EXECUTABLE ${LIBDIR}/python/${_PYTHON_VERSION_NO_DOTS}/bin/python$<$<CONFIG:Debug>:_d>.exe)
set(PYTHON_INCLUDE_DIR ${LIBDIR}/python/${_PYTHON_VERSION_NO_DOTS}/include)
set(PYTHON_NUMPY_INCLUDE_DIRS ${LIBDIR}/python/${_PYTHON_VERSION_NO_DOTS}/lib/site-packages/numpy/core/include)
set(NUMPY_FOUND ON)
@@ -740,7 +763,7 @@ if(WITH_TBB)
endif()
# used in many places so include globally, like OpenGL
include_directories(SYSTEM "${PTHREADS_INCLUDE_DIRS}")
blender_include_dirs_sys("${PTHREADS_INCLUDE_DIRS}")
set(WINTAB_INC ${LIBDIR}/wintab/include)
@@ -847,8 +870,8 @@ endif()
if(WINDOWS_PYTHON_DEBUG)
# Include the system scripts in the blender_python_system_scripts project.
file(GLOB_RECURSE inFiles "${CMAKE_SOURCE_DIR}/release/scripts/*.*" )
add_custom_target(blender_python_system_scripts SOURCES ${inFiles})
FILE(GLOB_RECURSE inFiles "${CMAKE_SOURCE_DIR}/release/scripts/*.*" )
ADD_CUSTOM_TARGET(blender_python_system_scripts SOURCES ${inFiles})
foreach(_source IN ITEMS ${inFiles})
get_filename_component(_source_path "${_source}" PATH)
string(REPLACE "${CMAKE_SOURCE_DIR}/release/scripts/" "" _source_path "${_source_path}")
@@ -868,8 +891,8 @@ if(WINDOWS_PYTHON_DEBUG)
endif()
file(TO_CMAKE_PATH ${USER_SCRIPTS_ROOT} USER_SCRIPTS_ROOT)
file(GLOB_RECURSE inFiles "${USER_SCRIPTS_ROOT}/*.*" )
add_custom_target(blender_python_user_scripts SOURCES ${inFiles})
FILE(GLOB_RECURSE inFiles "${USER_SCRIPTS_ROOT}/*.*" )
ADD_CUSTOM_TARGET(blender_python_user_scripts SOURCES ${inFiles})
foreach(_source IN ITEMS ${inFiles})
get_filename_component(_source_path "${_source}" PATH)
string(REPLACE "${USER_SCRIPTS_ROOT}" "" _source_path "${_source_path}")

View File

@@ -3,22 +3,20 @@
# First generate the manifest for tests since it will not need the dependency on the CRT.
configure_file(${CMAKE_SOURCE_DIR}/release/windows/manifest/blender.exe.manifest.in ${CMAKE_CURRENT_BINARY_DIR}/tests.exe.manifest @ONLY)
# Always detect system libraries, since they are also used by oneAPI.
# But don't always install them, only for WITH_WINDOWS_BUNDLE_CRT=ON.
set(CMAKE_INSTALL_SYSTEM_RUNTIME_LIBS_SKIP TRUE)
set(CMAKE_INSTALL_UCRT_LIBRARIES TRUE)
set(CMAKE_INSTALL_OPENMP_LIBRARIES ${WITH_OPENMP})
# This sometimes can change when updates are installed and the compiler version
# changes, so test if it exists and if not, give InstallRequiredSystemLibraries
# another chance to figure out the path.
if(MSVC_REDIST_DIR AND NOT EXISTS "${MSVC_REDIST_DIR}")
unset(MSVC_REDIST_DIR CACHE)
endif()
include(InstallRequiredSystemLibraries)
if(WITH_WINDOWS_BUNDLE_CRT)
set(CMAKE_INSTALL_SYSTEM_RUNTIME_LIBS_SKIP TRUE)
set(CMAKE_INSTALL_UCRT_LIBRARIES TRUE)
set(CMAKE_INSTALL_OPENMP_LIBRARIES ${WITH_OPENMP})
# This sometimes can change when updates are installed and the compiler version
# changes, so test if it exists and if not, give InstallRequiredSystemLibraries
# another chance to figure out the path.
if(MSVC_REDIST_DIR AND NOT EXISTS "${MSVC_REDIST_DIR}")
unset(MSVC_REDIST_DIR CACHE)
endif()
include(InstallRequiredSystemLibraries)
# ucrtbase(d).dll cannot be in the manifest, due to the way windows 10 handles
# redirects for this dll, for details see T88813.
foreach(lib ${CMAKE_INSTALL_SYSTEM_RUNTIME_LIBS})

View File

@@ -30,8 +30,6 @@ from typing import (
cast,
)
import shlex
SOURCE_DIR = join(dirname(__file__), "..", "..")
SOURCE_DIR = normpath(SOURCE_DIR)
@@ -162,7 +160,7 @@ def build_info(
for c in compilers:
args = args.replace(c, fake_compiler)
args = shlex.split(args)
args = args.split()
# end
# remove compiler

View File

@@ -1,222 +0,0 @@
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0-or-later
"""
Make Python wheel package (`*.whl`) file from Blender built with 'WITH_PYTHON_MODULE' enabled.
Example
=======
If the "bpy" module was build on Linux using the command:
make bpy lite
The command to package it as a wheel is:
./build_files/utils/make_bpy_wheel.py ../build_linux_bpy_lite/bin --output-dir=./
This will create a `*.whl` file in the current directory.
"""
import argparse
import make_utils
import os
import re
import platform
import string
import setuptools # type: ignore
import sys
from typing import (
Generator,
List,
Optional,
Sequence,
Tuple,
)
# ------------------------------------------------------------------------------
# Generic Functions
def find_dominating_file(
path: str,
search: Sequence[str],
) -> str:
while True:
for d in search:
if os.path.exists(os.path.join(path, d)):
return os.path.join(path, d)
path_next = os.path.normpath(os.path.join(path, ".."))
if path == path_next:
break
path = path_next
return ""
# ------------------------------------------------------------------------------
# CMake Cache Access
def cmake_cache_var_iter(filepath_cmake_cache: str) -> Generator[Tuple[str, str, str], None, None]:
import re
re_cache = re.compile(r"([A-Za-z0-9_\-]+)?:?([A-Za-z0-9_\-]+)?=(.*)$")
with open(filepath_cmake_cache, "r", encoding="utf-8") as cache_file:
for l in cache_file:
match = re_cache.match(l.strip())
if match is not None:
var, type_, val = match.groups()
yield (var, type_ or "", val)
def cmake_cache_var(filepath_cmake_cache: str, var: str) -> Optional[str]:
for var_iter, type_iter, value_iter in cmake_cache_var_iter(filepath_cmake_cache):
if var == var_iter:
return value_iter
return None
def cmake_cache_var_or_exit(filepath_cmake_cache: str, var: str) -> str:
value = cmake_cache_var(filepath_cmake_cache, var)
if value is None:
sys.stderr.write("Unable to find %r in %r, abort!\n" % (var, filepath_cmake_cache))
sys.exit(1)
return value
# ------------------------------------------------------------------------------
# Argument Parser
def argparse_create() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
"install_dir",
metavar='INSTALL_DIR',
type=str,
help="The installation directory containing the \"bpy\" package.",
)
parser.add_argument(
"--build-dir",
metavar='BUILD_DIR',
default=None,
help="The build directory containing 'CMakeCache.txt' (search parent directories of INSTALL_DIR when omitted).",
required=False,
)
parser.add_argument(
"--output-dir",
metavar='OUTPUT_DIR',
default=None,
help="The destination directory for the '*.whl' file (use INSTALL_DIR when omitted).",
required=False,
)
return parser
# ------------------------------------------------------------------------------
# Main Function
def main() -> None:
# Parse arguments.
args = argparse_create().parse_args()
install_dir = os.path.abspath(args.install_dir)
output_dir = os.path.abspath(args.output_dir) if args.output_dir else install_dir
if args.build_dir:
build_dir = os.path.abspath(args.build_dir)
filepath_cmake_cache = os.path.join(build_dir, "CMakeCache.txt")
del build_dir
if not os.path.exists(filepath_cmake_cache):
sys.stderr.write("File not found %r, abort!\n" % filepath_cmake_cache)
sys.exit(1)
else:
filepath_cmake_cache = find_dominating_file(install_dir, ("CMakeCache.txt",))
if not filepath_cmake_cache:
# Should never fail.
sys.stderr.write("Unable to find CMakeCache.txt in or above %r, abort!\n" % install_dir)
sys.exit(1)
# Get the major and minor Python version.
python_version = cmake_cache_var_or_exit(filepath_cmake_cache, "PYTHON_VERSION")
python_version_number = (
tuple(int("".join(c for c in digit if c in string.digits)) for digit in python_version.split(".")) +
# Support version without a minor version "3" (add zero).
tuple((0, 0, 0))
)
python_version_str = "%d.%d" % python_version_number[:2]
# Get Blender version.
blender_version_str = str(make_utils.parse_blender_version())
# Set platform tag following conventions.
if sys.platform == "darwin":
target = cmake_cache_var_or_exit(filepath_cmake_cache, "CMAKE_OSX_DEPLOYMENT_TARGET").split(".")
machine = cmake_cache_var_or_exit(filepath_cmake_cache, "CMAKE_OSX_ARCHITECTURES")
platform_tag = "macosx_%d_%d_%s" % (int(target[0]), int(target[1]), machine)
elif sys.platform == "win32":
platform_tag = "win_%s" % (platform.machine().lower())
elif sys.platform == "linux":
glibc = os.confstr("CS_GNU_LIBC_VERSION")
if glibc is None:
sys.stderr.write("Unable to find \"CS_GNU_LIBC_VERSION\", abort!\n")
sys.exit(1)
glibc = "%s_%s" % tuple(glibc.split()[1].split(".")[:2])
platform_tag = "manylinux_%s_%s" % (glibc, platform.machine().lower())
else:
sys.stderr.write("Unsupported platform: %s, abort!\n" % (sys.platform))
sys.exit(1)
os.chdir(install_dir)
# Include all files recursively.
def package_files(root_dir: str) -> List[str]:
paths = []
for path, dirs, files in os.walk(root_dir):
paths += [os.path.join("..", path, f) for f in files]
return paths
# Ensure this wheel is marked platform specific.
class BinaryDistribution(setuptools.dist.Distribution): # type: ignore
def has_ext_modules(self) -> bool:
return True
# Build wheel.
sys.argv = [sys.argv[0], "bdist_wheel"]
setuptools.setup(
name="bpy",
version=blender_version_str,
install_requires=["cython", "numpy", "requests", "zstandard"],
python_requires="==%d.%d.*" % (python_version_number[0], python_version_number[1]),
packages=["bpy"],
package_data={"": package_files("bpy")},
distclass=BinaryDistribution,
options={"bdist_wheel": {"plat_name": platform_tag}},
description="Blender as a Python module",
license="GPL-3.0",
author="Blender Foundation",
author_email="bf-committers@blender.org",
url="https://www.blender.org"
)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Move wheel to output directory.
dist_dir = os.path.join(install_dir, "dist")
for f in os.listdir(dist_dir):
if f.endswith(".whl"):
# No apparent way to override this ABI version with setuptools, so rename.
sys_py = "cp%d%d" % (sys.version_info.major, sys.version_info.minor)
sys_py_abi = sys_py + sys.abiflags
blender_py = "cp%d%d" % (python_version_number[0], python_version_number[1])
renamed_f = f.replace(sys_py_abi, blender_py).replace(sys_py, blender_py)
os.rename(os.path.join(dist_dir, f), os.path.join(output_dir, renamed_f))
if __name__ == "__main__":
main()

View File

@@ -2,7 +2,7 @@
# SPDX-License-Identifier: GPL-2.0-or-later
import argparse
import make_utils
import dataclasses
import os
import re
import subprocess
@@ -19,8 +19,6 @@ from typing import Iterable, TextIO, Optional, Any, Union
SKIP_NAMES = {
".gitignore",
".gitmodules",
".gitattributes",
".git-blame-ignore-revs",
".arcconfig",
".svn",
}
@@ -52,7 +50,7 @@ def main() -> None:
print(f"Output dir: {curdir}")
version = make_utils.parse_blender_version()
version = parse_blender_version(blender_srcdir)
tarball = tarball_path(curdir, version, cli_args)
manifest = manifest_path(tarball)
packages_dir = packages_path(curdir, cli_args)
@@ -64,7 +62,53 @@ def main() -> None:
print("Done!")
def tarball_path(output_dir: Path, version: make_utils.BlenderVersion, cli_args: Any) -> Path:
@dataclasses.dataclass
class BlenderVersion:
version: int # 293 for 2.93.1
patch: int # 1 for 2.93.1
cycle: str # 'alpha', 'beta', 'release', maybe others.
@property
def is_release(self) -> bool:
return self.cycle == "release"
def __str__(self) -> str:
"""Convert to version string.
>>> str(BlenderVersion(293, 1, "alpha"))
'2.93.1-alpha'
>>> str(BlenderVersion(327, 0, "release"))
'3.27.0'
"""
version_major = self.version // 100
version_minor = self.version % 100
as_string = f"{version_major}.{version_minor}.{self.patch}"
if self.is_release:
return as_string
return f"{as_string}-{self.cycle}"
def parse_blender_version(blender_srcdir: Path) -> BlenderVersion:
version_path = blender_srcdir / "source/blender/blenkernel/BKE_blender_version.h"
version_info = {}
line_re = re.compile(r"^#define (BLENDER_VERSION[A-Z_]*)\s+([0-9a-z]+)$")
with version_path.open(encoding="utf-8") as version_file:
for line in version_file:
match = line_re.match(line.strip())
if not match:
continue
version_info[match.group(1)] = match.group(2)
return BlenderVersion(
int(version_info["BLENDER_VERSION"]),
int(version_info["BLENDER_VERSION_PATCH"]),
version_info["BLENDER_VERSION_CYCLE"],
)
def tarball_path(output_dir: Path, version: BlenderVersion, cli_args: Any) -> Path:
extra = ""
if cli_args.include_packages:
extra = "-with-libraries"
@@ -104,7 +148,7 @@ def packages_path(current_directory: Path, cli_args: Any) -> Optional[Path]:
def create_manifest(
version: make_utils.BlenderVersion,
version: BlenderVersion,
outpath: Path,
blender_srcdir: Path,
packages_dir: Optional[Path],
@@ -126,9 +170,9 @@ def main_files_to_manifest(blender_srcdir: Path, outfile: TextIO) -> None:
def submodules_to_manifest(
blender_srcdir: Path, version: make_utils.BlenderVersion, outfile: TextIO
blender_srcdir: Path, version: BlenderVersion, outfile: TextIO
) -> None:
skip_addon_contrib = version.is_release()
skip_addon_contrib = version.is_release
assert not blender_srcdir.is_absolute()
for line in git_command("-C", blender_srcdir, "submodule"):
@@ -156,11 +200,7 @@ def packages_to_manifest(outfile: TextIO, packages_dir: Path) -> None:
def create_tarball(
version: make_utils.BlenderVersion,
tarball: Path,
manifest: Path,
blender_srcdir: Path,
packages_dir: Optional[Path],
version: BlenderVersion, tarball: Path, manifest: Path, blender_srcdir: Path, packages_dir: Optional[Path]
) -> None:
print(f'Creating archive: "{tarball}" ...', end="", flush=True)
command = ["tar"]

View File

@@ -9,15 +9,9 @@ import re
import shutil
import subprocess
import sys
from pathlib import Path
from typing import (
Sequence,
Optional,
)
def call(cmd: Sequence[str], exit_on_error: bool = True, silent: bool = False) -> int:
def call(cmd, exit_on_error=True, silent=False):
if not silent:
print(" ".join(cmd))
@@ -35,7 +29,7 @@ def call(cmd: Sequence[str], exit_on_error: bool = True, silent: bool = False) -
return retcode
def check_output(cmd: Sequence[str], exit_on_error: bool = True) -> str:
def check_output(cmd, exit_on_error=True):
# Flush to ensure correct order output on Windows.
sys.stdout.flush()
sys.stderr.flush()
@@ -52,14 +46,14 @@ def check_output(cmd: Sequence[str], exit_on_error: bool = True) -> str:
return output.strip()
def git_branch_exists(git_command: str, branch: str) -> bool:
def git_branch_exists(git_command, branch):
return (
call([git_command, "rev-parse", "--verify", branch], exit_on_error=False, silent=True) == 0 or
call([git_command, "rev-parse", "--verify", "remotes/origin/" + branch], exit_on_error=False, silent=True) == 0
)
def git_branch(git_command: str) -> str:
def git_branch(git_command):
# Get current branch name.
try:
branch = subprocess.check_output([git_command, "rev-parse", "--abbrev-ref", "HEAD"])
@@ -70,7 +64,7 @@ def git_branch(git_command: str) -> str:
return branch.strip().decode('utf8')
def git_tag(git_command: str) -> Optional[str]:
def git_tag(git_command):
# Get current tag name.
try:
tag = subprocess.check_output([git_command, "describe", "--exact-match"], stderr=subprocess.STDOUT)
@@ -80,19 +74,18 @@ def git_tag(git_command: str) -> Optional[str]:
return tag.strip().decode('utf8')
def git_branch_release_version(branch: str, tag: str) -> Optional[str]:
re_match = re.search("^blender-v(.*)-release$", branch)
release_version = None
if re_match:
release_version = re_match.group(1)
def git_branch_release_version(branch, tag):
release_version = re.search("^blender-v(.*)-release$", branch)
if release_version:
release_version = release_version.group(1)
elif tag:
re_match = re.search(r"^v([0-9]*\.[0-9]*).*", tag)
if re_match:
release_version = re_match.group(1)
release_version = re.search(r"^v([0-9]*\.[0-9]*).*", tag)
if release_version:
release_version = release_version.group(1)
return release_version
def svn_libraries_base_url(release_version: Optional[str], branch: Optional[str] = None) -> str:
def svn_libraries_base_url(release_version, branch=None):
if release_version:
svn_branch = "tags/blender-" + release_version + "-release"
elif branch:
@@ -102,58 +95,9 @@ def svn_libraries_base_url(release_version: Optional[str], branch: Optional[str]
return "https://svn.blender.org/svnroot/bf-blender/" + svn_branch + "/lib/"
def command_missing(command: str) -> bool:
def command_missing(command):
# Support running with Python 2 for macOS
if sys.version_info >= (3, 0):
return shutil.which(command) is None
else:
return False
class BlenderVersion:
def __init__(self, version: int, patch: int, cycle: str):
# 293 for 2.93.1
self.version = version
# 1 for 2.93.1
self.patch = patch
# 'alpha', 'beta', 'release', maybe others.
self.cycle = cycle
def is_release(self) -> bool:
return self.cycle == "release"
def __str__(self) -> str:
"""Convert to version string.
>>> str(BlenderVersion(293, 1, "alpha"))
'2.93.1-alpha'
>>> str(BlenderVersion(327, 0, "release"))
'3.27.0'
"""
version_major = self.version // 100
version_minor = self.version % 100
as_string = f"{version_major}.{version_minor}.{self.patch}"
if self.is_release():
return as_string
return f"{as_string}-{self.cycle}"
def parse_blender_version() -> BlenderVersion:
blender_srcdir = Path(__file__).absolute().parent.parent.parent
version_path = blender_srcdir / "source/blender/blenkernel/BKE_blender_version.h"
version_info = {}
line_re = re.compile(r"^#define (BLENDER_VERSION[A-Z_]*)\s+([0-9a-z]+)$")
with version_path.open(encoding="utf-8") as version_file:
for line in version_file:
match = line_re.match(line.strip())
if not match:
continue
version_info[match.group(1)] = match.group(2)
return BlenderVersion(
int(version_info["BLENDER_VERSION"]),
int(version_info["BLENDER_VERSION_PATCH"]),
version_info["BLENDER_VERSION_CYCLE"],
)

View File

@@ -139,7 +139,7 @@ https://www.blender.org''')
l = lines.pop(0)
if l:
assert l.startswith('\t')
assert(l.startswith('\t'))
l = l[1:] # Remove first white-space (tab).
fh.write('%s\n' % man_format(l))

View File

@@ -134,6 +134,7 @@ batch = batch_for_shader(shader, 'LINES', {"pos": coords})
def draw():
shader.bind()
shader.uniform_float("color", (1, 1, 0, 1))
batch.draw(shader)

View File

@@ -58,6 +58,7 @@ batch = batch_for_shader(
def draw():
shader.bind()
matrix = bpy.context.region_data.perspective_matrix
shader.uniform_float("u_ViewProjectionMatrix", matrix)
shader.uniform_float("u_Scale", 10)

View File

@@ -41,6 +41,7 @@ batch = batch_for_shader(shader, 'TRIS', {"position": coords})
def draw():
shader.bind()
matrix = bpy.context.region_data.perspective_matrix
shader.uniform_float("viewProjectionMatrix", matrix)
shader.uniform_float("brightness", 0.5)

View File

@@ -22,6 +22,7 @@ batch = batch_for_shader(shader, 'LINES', {"pos": coords}, indices=indices)
def draw():
shader.bind()
shader.uniform_float("color", (1, 0, 0, 1))
batch.draw(shader)

View File

@@ -18,6 +18,7 @@ batch = batch_for_shader(shader, 'TRIS', {"pos": vertices}, indices=indices)
def draw():
shader.bind()
shader.uniform_float("color", (0, 0.5, 0.5, 1.0))
batch.draw(shader)

View File

@@ -56,6 +56,7 @@ batch = batch_for_shader(
def draw():
shader.bind()
shader.uniform_sampler("image", texture)
batch.draw(shader)

View File

@@ -76,6 +76,7 @@ batch = batch_for_shader(
def draw():
shader.bind()
shader.uniform_float("modelMatrix", Matrix.Translation((1, 2, 3)) @ Matrix.Scale(3, 4))
shader.uniform_float("viewProjectionMatrix", bpy.context.region_data.perspective_matrix)
shader.uniform_sampler("image", offscreen.texture_color)

View File

@@ -1,11 +1,11 @@
sphinx==5.1.1
sphinx==5.0.1
# Sphinx dependencies that are important
Jinja2==3.1.2
Pygments==2.13.0
Pygments==2.12.0
docutils==0.17.1
snowballstemmer==2.2.0
babel==2.10.3
babel==2.10.1
requests==2.27.1
# Only needed to match the theme used for the official documentation.

View File

@@ -1,9 +1,7 @@
:tocdepth: 2
Change Log
**********
Changes in Blender's Python API between releases.
Blender API Change Log
**********************
.. note, this document is auto generated by sphinx_changelog_gen.py

View File

@@ -1,15 +0,0 @@
.. _info_advanced-index:
********
Advanced
********
This chapter covers advanced use (topics which may not be required for typical usage).
.. NOTE(@campbellbarton): Blender-as-a-Python-module is too obscure a topic to list directly on the main-page,
so opt for an "Advanced" page which can be expanded on as needed.
.. toctree::
:maxdepth: 1
info_advanced_blender_as_bpy.rst

View File

@@ -1,126 +0,0 @@
**************************
Blender as a Python Module
**************************
Blender supports being built as a Python module,
allowing ``import bpy`` to be added to any Python script, providing access to Blender's features.
.. note::
At time of writing official builds are not available,
using this requires compiling Blender yourself see
`build instructions <https://wiki.blender.org/w/index.php?title=Building_Blender/Other/BlenderAsPyModule>`__.
Use Cases
=========
Python developers may wish to integrate Blender scripts which don't center around Blender.
Possible uses include:
- Visualizing data by rendering images and animations.
- Image processing using Blender's compositor.
- Video editing (using Blender's sequencer).
- 3D file conversion.
- Development, accessing ``bpy`` from Python IDE's and debugging tools for example.
- Automation.
Usage
=====
For the most part using Blender as a Python module is equivalent to running a script in background-mode
(passing the command-line arguments ``--background`` or ``-b``),
however there are some differences to be aware of.
.. Sorted alphabetically as there isn't an especially a logical order to show them.
Blender's Executable Access
The attribute :class:`bpy.app.binary_path` defaults to an empty string.
If you wish to point this to the location of a known executable you may set the value.
This example searches for the binary, setting it when found:
.. code-block:: python
import bpy
import shutil
blender_bin = shutil.which("blender")
if blender_bin:
print("Found:", blender_bin)
bpy.app.binary_path = blender_bin
else:
print("Unable to find blender!")
Blender's Internal Modules
There are many modules included with Blender such as :mod:`gpu` and :mod:`mathuils`.
It's important that these are imported after ``bpy`` or they will not be found.
Command Line Arguments Unsupported
Functionality controlled by command line arguments (shown by calling ``blender --help`` aren't accessible).
Typically this isn't such a limitation although there are some command line arguments that don't have
equivalents in Blender's Python API (``--threads`` and ``--log`` for example).
.. note::
Access to these settings may be added in the future as needed.
Resource Sharing (GPU)
It's possible other Python modules make use of the GPU in a way that prevents Blender/Cycles from accessing the GPU.
Signal Handlers
Blender's typical signal handlers are not initialized, so there is no special handling for ``Control-C``
to cancel a render and a crash log is not written in the event of a crash.
Startup and Preferences
When the ``bpy`` module loads it contains the default startup scene
(instead of an "empty" blend-file as you might expect), so there is a default cube, camera and light.
If you wish to start from an empty file use: ``bpy.ops.wm.read_factory_settings(use_empty=True)``.
The users startup and preferences are ignored to prevent your local configuration from impacting scripts behavior.
The Python module behaves as if ``--factory-startup`` was passed as a command line argument.
The users preferences and startup can be loaded using operators:
.. code-block:: python
import bpy
bpy.ops.wm.read_userpref()
bpy.ops.wm.read_homefile()
Limitations
===========
Most constraints of Blender as an application still apply:
Reloading Unsupported
Reloading the ``bpy`` module via ``importlib.reload`` will raise an exception
instead of reloading and resetting the module.
Instead, the operator ``bpy.ops.wm.read_factory_settings()`` can be used to reset the internal state.
Single Blend File Restriction
Only a single ``.blend`` file can be edited at a time.
.. hint::
As with the application it's possible to start multiple instances,
each with their own ``bpy`` and therefor Blender state.
Python provides the ``multiprocessing`` module to make communicating with sub-processes more convenient.
In some cases the library API may be an alternative to starting separate processes,
although this API operates on reading and writing ID data-blocks and isn't
a complete substitute for loading ``.blend`` files, see:
- :meth:`bpy.types.BlendDataLibraries.load`
- :meth:`bpy.types.BlendDataLibraries.write`
- :meth:`bpy.types.BlendData.temp_data`
supports a temporary data-context to avoid manipulating the current ``.blend`` file.

View File

@@ -1,6 +1,6 @@
*******************
API Reference Usage
Reference API Usage
*******************
Blender has many interlinking data types which have an auto-generated reference API which often has the information

View File

@@ -1,8 +1,8 @@
.. _info_overview:
************
API Overview
************
*******************
Python API Overview
*******************
The purpose of this document is to explain how Python and Blender fit together,
covering some of the functionality that may not be obvious from reading the API references

View File

@@ -241,9 +241,9 @@ def main():
comment_washed = []
comment = [] if comment is None else comment
for i, l in enumerate(comment):
assert ((l.strip() == "") or
(l in {"/*", " *"}) or
(l.startswith(("/* ", " * "))))
assert((l.strip() == "") or
(l in {"/*", " *"}) or
(l.startswith(("/* ", " * "))))
l = l[3:]
if i == 0 and not l.strip():
@@ -270,7 +270,7 @@ def main():
tp_sub = None
else:
print(arg)
assert 0
assert(0)
tp_str = ""
@@ -315,7 +315,7 @@ def main():
tp_str += " or any sequence of 3 floats"
elif tp == BMO_OP_SLOT_PTR:
tp_str = "dict"
assert tp_sub is not None
assert(tp_sub is not None)
if tp_sub == BMO_OP_SLOT_SUBTYPE_PTR_BMESH:
tp_str = ":class:`bmesh.types.BMesh`"
elif tp_sub == BMO_OP_SLOT_SUBTYPE_PTR_SCENE:
@@ -330,10 +330,10 @@ def main():
tp_str = ":class:`bpy.types.bpy_struct`"
else:
print("Can't find", vars_dict_reverse[tp_sub])
assert 0
assert(0)
elif tp == BMO_OP_SLOT_ELEMENT_BUF:
assert tp_sub is not None
assert(tp_sub is not None)
ls = []
if tp_sub & BM_VERT:
@@ -342,7 +342,7 @@ def main():
ls.append(":class:`bmesh.types.BMEdge`")
if tp_sub & BM_FACE:
ls.append(":class:`bmesh.types.BMFace`")
assert ls # Must be at least one.
assert(ls) # must be at least one
if tp_sub & BMO_OP_SLOT_SUBTYPE_ELEM_IS_SINGLE:
tp_str = "/".join(ls)
@@ -367,10 +367,10 @@ def main():
tp_str += "unknown internal data, not compatible with python"
else:
print("Can't find", vars_dict_reverse[tp_sub])
assert 0
assert(0)
else:
print("Can't find", vars_dict_reverse[tp])
assert 0
assert(0)
args_wash.append((name, tp_str, comment))
return args_wash
@@ -394,7 +394,7 @@ def main():
fw(" :return:\n\n")
for (name, tp, comment) in args_out_wash:
assert name.endswith(".out")
assert(name.endswith(".out"))
name = name[:-4]
fw(" - ``%s``: %s\n\n" % (name, comment))
fw(" **type** %s\n" % tp)

View File

@@ -101,7 +101,7 @@ def api_dump(args):
version, version_key = api_version()
if version is None:
raise ValueError("API dumps can only be generated from within Blender.")
raise(ValueError("API dumps can only be generated from within Blender."))
dump = {}
dump_module = dump["bpy.types"] = {}
@@ -250,7 +250,7 @@ def api_changelog(args):
version, version_key = api_version()
if version is None and (filepath_in_from is None or filepath_in_to is None):
raise ValueError("API dumps files must be given when ran outside of Blender.")
raise(ValueError("API dumps files must be given when ran outside of Blender."))
with open(indexpath, 'r', encoding='utf-8') as file_handle:
index = json.load(file_handle)
@@ -258,21 +258,17 @@ def api_changelog(args):
if filepath_in_to is None:
filepath_in_to = index.get(version_key, None)
if filepath_in_to is None:
raise ValueError("Cannot find API dump file for Blender version " + str(version) + " in index file.")
raise(ValueError("Cannot find API dump file for Blender version " + str(version) + " in index file."))
print("Found to file: %r" % filepath_in_to)
if filepath_in_from is None:
version_from, version_from_key = api_version_previous_in_index(index, version)
if version_from is None:
raise ValueError("No previous version of Blender could be found in the index.")
raise(ValueError("No previous version of Blender could be found in the index."))
filepath_in_from = index.get(version_from_key, None)
if filepath_in_from is None:
raise ValueError(
"Cannot find API dump file for previous Blender version " +
str(version_from) +
" in index file."
)
raise(ValueError("Cannot find API dump file for previous Blender version " + str(version_from) + " in index file."))
print("Found from file: %r" % filepath_in_from)
@@ -281,7 +277,7 @@ def api_changelog(args):
with open(os.path.join(rootpath, filepath_in_to), 'r', encoding='utf-8') as file_handle:
dump_version, dict_to = json.load(file_handle)
assert tuple(dump_version) == version
assert(tuple(dump_version) == version)
api_changes = []
@@ -349,10 +345,8 @@ def api_changelog(args):
fw(""
":tocdepth: 2\n"
"\n"
"Change Log\n"
"**********\n"
"\n"
"Changes in Blender's Python API between releases.\n"
"Blender API Change Log\n"
"**********************\n"
"\n"
".. note, this document is auto generated by sphinx_changelog_gen.py\n"
"\n"

View File

@@ -387,35 +387,23 @@ EXAMPLE_SET_USED = set()
# RST files directory.
RST_DIR = os.path.abspath(os.path.join(SCRIPT_DIR, "rst"))
# Extra info, not api reference docs stored in `./rst/info_*`.
# Pairs of (file, description), the title makes from the RST files are displayed before the description.
# extra info, not api reference docs
# stored in ./rst/info_*
INFO_DOCS = (
("info_quickstart.rst",
"New to Blender or scripting and want to get your feet wet?"),
"Quickstart: New to Blender or scripting and want to get your feet wet?"),
("info_overview.rst",
"A more complete explanation of Python integration."),
"API Overview: A more complete explanation of Python integration"),
("info_api_reference.rst",
"Examples of how to use the API reference docs."),
"API Reference Usage: examples of how to use the API reference docs"),
("info_best_practice.rst",
"Conventions to follow for writing good scripts."),
"Best Practice: Conventions to follow for writing good scripts"),
("info_tips_and_tricks.rst",
"Hints to help you while writing scripts for Blender."),
"Tips and Tricks: Hints to help you while writing scripts for Blender"),
("info_gotcha.rst",
"Some of the problems you may encounter when writing scripts."),
("info_advanced.rst",
"Topics which may not be required for typical usage."),
("change_log.rst",
"List of changes since last Blender release"),
"Gotcha's: Some of the problems you may encounter when writing scripts"),
("change_log.rst", "Change Log: List of changes since last Blender release"),
)
# Referenced indirectly.
INFO_DOCS_OTHER = (
# Included by: `info_advanced.rst`.
"info_advanced_blender_as_bpy.rst",
)
# Hide the actual TOC, use a separate list that links to the items.
# This is done so a short description can be included with each link.
USE_INFO_DOCS_FANCY_INDEX = True
# only support for properties atm.
RNA_BLACKLIST = {
@@ -1482,7 +1470,7 @@ def pyrna2sphinx(basepath):
struct_module_name = struct.module_name
if USE_ONLY_BUILTIN_RNA_TYPES:
assert struct_module_name == "bpy.types"
assert(struct_module_name == "bpy.types")
filepath = os.path.join(basepath, "%s.%s.rst" % (struct_module_name, struct.identifier))
file = open(filepath, "w", encoding="utf-8")
fw = file.write
@@ -1916,7 +1904,7 @@ except ModuleNotFoundError:
# fw(" 'collapse_navigation': True,\n")
fw(" 'sticky_navigation': False,\n")
fw(" 'navigation_depth': 1,\n")
fw(" 'includehidden': False,\n")
# fw(" 'includehidden': True,\n")
# fw(" 'titles_only': False\n")
fw(" }\n\n")
@@ -1988,21 +1976,12 @@ def write_rst_index(basepath):
if not EXCLUDE_INFO_DOCS:
fw(".. toctree::\n")
if USE_INFO_DOCS_FANCY_INDEX:
fw(" :hidden:\n")
fw(" :maxdepth: 1\n")
fw(" :caption: Documentation\n\n")
for info, info_desc in INFO_DOCS:
fw(" %s\n" % info)
fw(" %s <%s>\n" % (info_desc, info))
fw("\n")
if USE_INFO_DOCS_FANCY_INDEX:
# Show a fake TOC, allowing for an extra description to be shown as well as the title.
fw(title_string("Documentation", "="))
for info, info_desc in INFO_DOCS:
fw("- :doc:`%s`: %s\n" % (info.removesuffix(".rst"), info_desc))
fw("\n")
fw(".. toctree::\n")
fw(" :maxdepth: 1\n")
fw(" :caption: Application Modules\n\n")
@@ -2335,8 +2314,6 @@ def copy_handwritten_rsts(basepath):
if not EXCLUDE_INFO_DOCS:
for info, _info_desc in INFO_DOCS:
shutil.copy2(os.path.join(RST_DIR, info), basepath)
for info in INFO_DOCS_OTHER:
shutil.copy2(os.path.join(RST_DIR, info), basepath)
# TODO: put this docs in Blender's code and use import as per modules above.
handwritten_modules = [

View File

@@ -3,7 +3,9 @@
# Too noisy for code we don't maintain.
if(CMAKE_COMPILER_IS_GNUCC)
add_cxx_flag("-Wno-cast-function-type")
if(NOT "${CMAKE_CXX_COMPILER_VERSION}" VERSION_LESS "8.0")
add_cxx_flag("-Wno-cast-function-type")
endif()
endif()
set(INC

View File

@@ -106,7 +106,8 @@
/* Copied from BLI_utils... */
/* C++ can't use _Static_assert, expects static_assert() but c++0x only,
* Coverity also errors out. */
#if (!defined(__cplusplus)) && (!defined(__COVERITY__)) && (defined(__GNUC__)) /* GCC only. */
#if (!defined(__cplusplus)) && (!defined(__COVERITY__)) && \
(defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 406)) /* gcc4.6+ only */
# define ATOMIC_STATIC_ASSERT(a, msg) __extension__ _Static_assert(a, msg);
#else
/* Code adapted from http://www.pixelbeat.org/programming/gcc/static_assert.html */

View File

@@ -6,8 +6,12 @@
#include "testing/testing.h"
#ifdef __GNUC__
# pragma GCC diagnostic error "-Wsign-compare"
# pragma GCC diagnostic error "-Wsign-conversion"
# if (__GNUC__ * 100 + __GNUC_MINOR__) >= 406 /* gcc4.6+ only */
# pragma GCC diagnostic error "-Wsign-compare"
# endif
# if (__GNUC__ * 100 + __GNUC_MINOR__) >= 408
# pragma GCC diagnostic error "-Wsign-conversion"
# endif
#endif
/* -------------------------------------------------------------------- */

View File

@@ -36,7 +36,7 @@ if(WITH_CYCLES_NATIVE_ONLY)
)
if(NOT MSVC)
add_check_cxx_compiler_flag(CMAKE_CXX_FLAGS _has_march_native "-march=native")
ADD_CHECK_CXX_COMPILER_FLAG(CMAKE_CXX_FLAGS _has_march_native "-march=native")
if(_has_march_native)
set(CYCLES_KERNEL_FLAGS "-march=native")
else()
@@ -45,18 +45,18 @@ if(WITH_CYCLES_NATIVE_ONLY)
unset(_has_march_native)
else()
if(NOT MSVC_NATIVE_ARCH_FLAGS)
try_run(
arch_run_result
arch_compile_result
${CMAKE_CURRENT_BINARY_DIR}/
${CMAKE_CURRENT_SOURCE_DIR}/cmake/msvc_arch_flags.c
COMPILE_OUTPUT_VARIABLE arch_compile_output
RUN_OUTPUT_VARIABLE arch_run_output
)
if(arch_compile_result AND "${arch_run_result}" EQUAL "0")
string(STRIP ${arch_run_output} arch_run_output)
set(MSVC_NATIVE_ARCH_FLAGS ${arch_run_output} CACHE STRING "MSVC Native architecture flags")
endif()
TRY_RUN(
arch_run_result
arch_compile_result
${CMAKE_CURRENT_BINARY_DIR}/
${CMAKE_CURRENT_SOURCE_DIR}/cmake/msvc_arch_flags.c
COMPILE_OUTPUT_VARIABLE arch_compile_output
RUN_OUTPUT_VARIABLE arch_run_output
)
if(arch_compile_result AND "${arch_run_result}" EQUAL "0")
string(STRIP ${arch_run_output} arch_run_output)
set(MSVC_NATIVE_ARCH_FLAGS ${arch_run_output} CACHE STRING "MSVC Native architecture flags")
endif()
endif()
set(CYCLES_KERNEL_FLAGS "${MSVC_NATIVE_ARCH_FLAGS}")
endif()
@@ -347,24 +347,6 @@ if(WITH_OPENCOLORIO)
)
endif()
if(WITH_CYCLES_PATH_GUIDING)
add_definitions(-DWITH_PATH_GUIDING)
# The level of the guiding integration.
# Different levels can be selected to measure the overhead of different stages.
# 1 = recording the path segments
# 2 = 1 + generating (not storing) sample data from the segments
# 3 = 2 + storing the generates sample data
# 4 = 3 + training the guiding fields
# 5 = 4 + querying the trained guiding for sampling (full path guiding)
add_definitions(-DPATH_GUIDING_LEVEL=5)
include_directories(
SYSTEM
${OPENPGL_INCLUDE_DIR}
)
endif()
# NaN debugging
if(WITH_CYCLES_DEBUG_NAN)
add_definitions(-DWITH_CYCLES_DEBUG_NAN)
@@ -382,7 +364,7 @@ endif()
# Warnings
if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_C_COMPILER_ID MATCHES "Clang")
add_check_cxx_compiler_flag(CMAKE_CXX_FLAGS _has_no_error_unused_macros "-Wno-error=unused-macros")
ADD_CHECK_CXX_COMPILER_FLAG(CMAKE_CXX_FLAGS _has_no_error_unused_macros "-Wno-error=unused-macros")
unset(_has_no_error_unused_macros)
endif()

View File

@@ -7,7 +7,6 @@ set(INC
../../mikktspace
../../../source/blender/makesdna
../../../source/blender/makesrna
../../../source/blender/blenkernel
../../../source/blender/blenlib
../../../source/blender/gpu
../../../source/blender/render

View File

@@ -13,7 +13,7 @@ def _configure_argument_parser():
action='store_true')
parser.add_argument("--cycles-device",
help="Set the device to use for Cycles, overriding user preferences and the scene setting."
"Valid options are 'CPU', 'CUDA', 'OPTIX', 'HIP', 'ONEAPI', or 'METAL'."
"Valid options are 'CPU', 'CUDA', 'OPTIX', 'HIP' or 'METAL'."
"Additionally, you can append '+CPU' to any GPU type for hybrid rendering.",
default=None)
return parser
@@ -156,11 +156,6 @@ def with_osl():
return _cycles.with_osl
def with_path_guiding():
import _cycles
return _cycles.with_path_guiding
def system_info():
import _cycles
return _cycles.system_info()

View File

@@ -179,12 +179,6 @@ enum_view3d_shading_render_pass = (
('SAMPLE_COUNT', "Sample Count", "Per-pixel number of samples"),
)
enum_guiding_distribution = (
('PARALLAX_AWARE_VMM', "Parallax-Aware VMM", "Use Parallax-aware von Mises-Fisher models as directional distribution", 0),
('DIRECTIONAL_QUAD_TREE', "Directional Quad Tree", "Use Directional Quad Trees as directional distribution", 1),
('VMM', "VMM", "Use von Mises-Fisher models as directional distribution", 2),
)
def enum_openimagedenoise_denoiser(self, context):
import _cycles
@@ -364,9 +358,7 @@ class CyclesRenderSettings(bpy.types.PropertyGroup):
preview_samples: IntProperty(
name="Viewport Samples",
description="Number of samples to render in the viewport, unlimited if 0",
min=0,
soft_min=1,
max=(1 << 24),
min=0, max=(1 << 24),
default=1024,
)
@@ -515,78 +507,6 @@ class CyclesRenderSettings(bpy.types.PropertyGroup):
default=1.0,
)
use_guiding: BoolProperty(
name="Guiding",
description="Use path guiding for sampling paths. Path guiding incrementally "
"learns the light distribution of the scene and guides path into directions "
"with high direct and indirect light contributions",
default=False,
)
use_deterministic_guiding: BoolProperty(
name="Deterministic",
description="Makes path guiding deterministic which means renderings will be"
"reproducible with the same pixel values every time. This feature slows down"
"training",
default=True,
)
guiding_distribution_type: EnumProperty(
name="Guiding Distribution Type",
description="Type of representation for the guiding distribution",
items=enum_guiding_distribution,
default='PARALLAX_AWARE_VMM',
)
use_surface_guiding: BoolProperty(
name="Surface Guiding",
description="Use guiding when sampling directions on a surface",
default=True,
)
surface_guiding_probability: FloatProperty(
name="Surface Guiding Probability",
description="The probability of guiding a direction on a surface",
min=0.0, max=1.0,
default=0.5,
)
use_volume_guiding: BoolProperty(
name="Volume Guiding",
description="Use guiding when sampling directions inside a volume",
default=True,
)
guiding_training_samples: IntProperty(
name="Training Samples",
description="The maximum number of samples used for training path guiding. "
"Higher samples lead to more accurate guiding, however may also unnecessarily slow "
"down rendering once guiding is accurate enough. "
"A value 0 will continue training until the last sample",
min=0,
soft_min=1,
default=128,
)
volume_guiding_probability: FloatProperty(
name="Volume Guiding Probability",
description="The probability of guiding a direction inside a volume",
min=0.0, max=1.0,
default=0.5,
)
use_guiding_direct_light: BoolProperty(
name="Guide Direct Light",
description="Consider the contribution of directly visible light sources during guiding",
default=True,
)
use_guiding_mis_weights: BoolProperty(
name="Use MIS Weights",
description="Use the MIS weight to weight the contribution of directly visible light sources during guiding",
default=True,
)
max_bounces: IntProperty(
name="Max Bounces",
description="Total maximum number of bounces",
@@ -1638,9 +1558,9 @@ class CyclesPreferences(bpy.types.AddonPreferences):
import sys
col.label(text="Requires Intel GPU with Xe-HPG architecture", icon='BLANK1')
if sys.platform.startswith("win"):
col.label(text="and Windows driver version 101.3430 or newer", icon='BLANK1')
col.label(text="and Windows driver version 101.3268 or newer", icon='BLANK1')
elif sys.platform.startswith("linux"):
col.label(text="and Linux driver version xx.xx.23904 or newer", icon='BLANK1')
col.label(text="and Linux driver version xx.xx.23570 or newer", icon='BLANK1')
elif device_type == 'METAL':
col.label(text="Requires Apple Silicon with macOS 12.2 or newer", icon='BLANK1')
col.label(text="or AMD with macOS 12.3 or newer", icon='BLANK1')

View File

@@ -278,63 +278,6 @@ class CYCLES_RENDER_PT_sampling_render_denoise(CyclesButtonsPanel, Panel):
col.prop(cscene, "denoising_prefilter", text="Prefilter")
class CYCLES_RENDER_PT_sampling_path_guiding(CyclesButtonsPanel, Panel):
bl_label = "Path Guiding"
bl_parent_id = "CYCLES_RENDER_PT_sampling"
bl_options = {'DEFAULT_CLOSED'}
@classmethod
def poll(cls, context):
from . import engine
return use_cpu(context) and engine.with_path_guiding()
def draw_header(self, context):
scene = context.scene
cscene = scene.cycles
self.layout.prop(cscene, "use_guiding", text="")
def draw(self, context):
scene = context.scene
cscene = scene.cycles
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
layout.active = cscene.use_guiding
col = layout.column(align=True)
col.prop(cscene, "use_surface_guiding")
col.prop(cscene, "use_volume_guiding")
col.prop(cscene, "guiding_training_samples")
class CYCLES_RENDER_PT_sampling_path_guiding_debug(CyclesDebugButtonsPanel, Panel):
bl_label = "Debug"
bl_parent_id = "CYCLES_RENDER_PT_sampling_path_guiding"
bl_options = {'DEFAULT_CLOSED'}
def draw(self, context):
scene = context.scene
cscene = scene.cycles
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
layout.active = cscene.use_guiding
layout.prop(cscene, "guiding_distribution_type", text="Distribution Type")
col = layout.column(align=True)
col.prop(cscene, "surface_guiding_probability")
col.prop(cscene, "volume_guiding_probability")
col = layout.column(align=True)
col.prop(cscene, "use_deterministic_guiding")
col.prop(cscene, "use_guiding_direct_light")
col.prop(cscene, "use_guiding_mis_weights")
class CYCLES_RENDER_PT_sampling_advanced(CyclesButtonsPanel, Panel):
bl_label = "Advanced"
bl_parent_id = "CYCLES_RENDER_PT_sampling"
@@ -2343,8 +2286,6 @@ classes = (
CYCLES_RENDER_PT_sampling_viewport_denoise,
CYCLES_RENDER_PT_sampling_render,
CYCLES_RENDER_PT_sampling_render_denoise,
CYCLES_RENDER_PT_sampling_path_guiding,
CYCLES_RENDER_PT_sampling_path_guiding_debug,
CYCLES_RENDER_PT_sampling_advanced,
CYCLES_RENDER_PT_light_paths,
CYCLES_RENDER_PT_light_paths_max_bounces,

View File

@@ -1084,23 +1084,23 @@ static void create_subd_mesh(Scene *scene,
const int edges_num = b_mesh.edges.length();
if (edges_num != 0 && b_mesh.edge_creases.length() > 0) {
if (edges_num != 0) {
size_t num_creases = 0;
const float *creases = static_cast<float *>(b_mesh.edge_creases[0].ptr.data);
const MEdge *edges = static_cast<MEdge *>(b_mesh.edges[0].ptr.data);
for (int i = 0; i < edges_num; i++) {
if (creases[i] != 0.0f) {
const MEdge &b_edge = edges[i];
if (b_edge.crease != 0) {
num_creases++;
}
}
mesh->reserve_subd_creases(num_creases);
const MEdge *edges = static_cast<MEdge *>(b_mesh.edges[0].ptr.data);
for (int i = 0; i < edges_num; i++) {
if (creases[i] != 0.0f) {
const MEdge &b_edge = edges[i];
mesh->add_edge_crease(b_edge.v1, b_edge.v2, creases[i]);
const MEdge &b_edge = edges[i];
if (b_edge.crease != 0) {
mesh->add_edge_crease(b_edge.v1, b_edge.v2, float(b_edge.crease) / 255.0f);
}
}

View File

@@ -23,8 +23,6 @@
#include "util/log.h"
#include "util/task.h"
#include "BKE_duplilist.h"
CCL_NAMESPACE_BEGIN
/* Utilities */
@@ -355,26 +353,79 @@ Object *BlenderSync::sync_object(BL::Depsgraph &b_depsgraph,
return object;
}
extern "C" DupliObject *rna_hack_DepsgraphObjectInstance_dupli_object_get(PointerRNA *ptr);
/* This function mirrors drw_uniform_property_lookup in draw_instance_data.cpp */
static bool lookup_property(BL::ID b_id, const string &name, float4 *r_value)
{
PointerRNA ptr;
PropertyRNA *prop;
if (!RNA_path_resolve(&b_id.ptr, name.c_str(), &ptr, &prop)) {
return false;
}
if (prop == NULL) {
return false;
}
PropertyType type = RNA_property_type(prop);
int arraylen = RNA_property_array_length(&ptr, prop);
if (arraylen == 0) {
float value;
if (type == PROP_FLOAT)
value = RNA_property_float_get(&ptr, prop);
else if (type == PROP_INT)
value = static_cast<float>(RNA_property_int_get(&ptr, prop));
else
return false;
*r_value = make_float4(value, value, value, 1.0f);
return true;
}
else if (type == PROP_FLOAT && arraylen <= 4) {
*r_value = make_float4(0.0f, 0.0f, 0.0f, 1.0f);
RNA_property_float_get_array(&ptr, prop, &r_value->x);
return true;
}
return false;
}
/* This function mirrors drw_uniform_attribute_lookup in draw_instance_data.cpp */
static float4 lookup_instance_property(BL::DepsgraphObjectInstance &b_instance,
const string &name,
bool use_instancer)
{
::Object *ob = (::Object *)b_instance.object().ptr.data;
::DupliObject *dupli = nullptr;
::Object *dupli_parent = nullptr;
string idprop_name = string_printf("[\"%s\"]", name.c_str());
float4 value;
/* If requesting instance data, check the parent particle system and object. */
if (use_instancer && b_instance.is_instance()) {
dupli = rna_hack_DepsgraphObjectInstance_dupli_object_get(&b_instance.ptr);
dupli_parent = (::Object *)b_instance.parent().ptr.data;
BL::ParticleSystem b_psys = b_instance.particle_system();
if (b_psys) {
if (lookup_property(b_psys.settings(), idprop_name, &value) ||
lookup_property(b_psys.settings(), name, &value)) {
return value;
}
}
if (lookup_property(b_instance.parent(), idprop_name, &value) ||
lookup_property(b_instance.parent(), name, &value)) {
return value;
}
}
float4 value;
BKE_object_dupli_find_rgba_attribute(ob, dupli, dupli_parent, name.c_str(), &value.x);
/* Check the object and mesh. */
BL::Object b_ob = b_instance.object();
BL::ID b_data = b_ob.data();
return value;
if (lookup_property(b_ob, idprop_name, &value) || lookup_property(b_ob, name, &value) ||
lookup_property(b_data, idprop_name, &value) || lookup_property(b_data, name, &value)) {
return value;
}
return zero_float4();
}
bool BlenderSync::sync_object_attributes(BL::DepsgraphObjectInstance &b_instance, Object *object)

View File

@@ -224,24 +224,27 @@ static void export_pointcloud_motion(PointCloud *pointcloud,
const int num_points = pointcloud->num_points();
float3 *mP = attr_mP->data_float3() + motion_step * num_points;
bool have_motion = false;
int num_motion_points = 0;
const array<float3> &pointcloud_points = pointcloud->get_points();
const int b_points_num = b_pointcloud.points.length();
BL::FloatVectorAttribute b_attr_position = find_position_attribute(b_pointcloud);
std::optional<BL::FloatAttribute> b_attr_radius = find_radius_attribute(b_pointcloud);
for (int i = 0; i < std::min(num_points, b_points_num); i++) {
const float3 co = get_float3(b_attr_position.data[i].vector());
const float radius = b_attr_radius ? b_attr_radius->data[i].value() : 0.0f;
float3 P = co;
P.w = radius;
mP[i] = P;
have_motion = have_motion || (P != pointcloud_points[i]);
for (int i = 0; i < num_points; i++) {
if (num_motion_points < num_points) {
const float3 co = get_float3(b_attr_position.data[i].vector());
const float radius = b_attr_radius ? b_attr_radius->data[i].value() : 0.0f;
float3 P = co;
P.w = radius;
mP[num_motion_points] = P;
have_motion = have_motion || (P != pointcloud_points[num_motion_points]);
num_motion_points++;
}
}
/* In case of new attribute, we verify if there really was any motion. */
if (new_attribute) {
if (b_points_num != num_points || !have_motion) {
if (num_motion_points != num_points || !have_motion) {
pointcloud->attributes.remove(ATTR_STD_MOTION_VERTEX_POSITION);
}
else if (motion_step > 0) {

View File

@@ -15,7 +15,6 @@
#include "util/debug.h"
#include "util/foreach.h"
#include "util/guiding.h"
#include "util/log.h"
#include "util/md5.h"
#include "util/opengl.h"
@@ -535,7 +534,7 @@ static PyObject *osl_update_node_func(PyObject * /*self*/, PyObject *args)
socket_type = "NodeSocketBool";
data_type = BL::NodeSocket::type_BOOLEAN;
if (param->validdefault) {
default_boolean = bool(param->idefault[0]);
default_boolean = (bool)param->idefault[0];
}
}
else {
@@ -1009,15 +1008,6 @@ void *CCL_python_module_init()
PyModule_AddStringConstant(mod, "osl_version_string", "unknown");
#endif
if (ccl::guiding_supported()) {
PyModule_AddObject(mod, "with_path_guiding", Py_True);
Py_INCREF(Py_True);
}
else {
PyModule_AddObject(mod, "with_path_guiding", Py_False);
Py_INCREF(Py_False);
}
#ifdef WITH_EMBREE
PyModule_AddObject(mod, "with_embree", Py_True);
Py_INCREF(Py_True);

View File

@@ -704,7 +704,7 @@ void BlenderSession::bake(BL::Depsgraph &b_depsgraph_,
buffer_params.window_width = bake_width;
buffer_params.window_height = bake_height;
/* Unique layer name for multi-image baking. */
buffer_params.layer = string_printf("bake_%d\n", bake_id++);
buffer_params.layer = string_printf("bake_%d\n", (int)full_buffer_files_.size());
/* Update session. */
session->reset(session_params, buffer_params);

View File

@@ -146,8 +146,6 @@ class BlenderSession {
BlenderDisplayDriver *display_driver_ = nullptr;
vector<string> full_buffer_files_;
int bake_id = 0;
};
CCL_NAMESPACE_END

View File

@@ -413,22 +413,6 @@ void BlenderSync::sync_integrator(BL::ViewLayer &b_view_layer, bool background)
integrator->set_direct_light_sampling_type(direct_light_sampling_type);
#endif
integrator->set_use_guiding(get_boolean(cscene, "use_guiding"));
integrator->set_use_surface_guiding(get_boolean(cscene, "use_surface_guiding"));
integrator->set_use_volume_guiding(get_boolean(cscene, "use_volume_guiding"));
integrator->set_guiding_training_samples(get_int(cscene, "guiding_training_samples"));
if (use_developer_ui) {
integrator->set_deterministic_guiding(get_boolean(cscene, "use_deterministic_guiding"));
integrator->set_surface_guiding_probability(get_float(cscene, "surface_guiding_probability"));
integrator->set_volume_guiding_probability(get_float(cscene, "volume_guiding_probability"));
integrator->set_use_guiding_direct_light(get_boolean(cscene, "use_guiding_direct_light"));
integrator->set_use_guiding_mis_weights(get_boolean(cscene, "use_guiding_mis_weights"));
GuidingDistributionType guiding_distribution_type = (GuidingDistributionType)get_enum(
cscene, "guiding_distribution_type", GUIDING_NUM_TYPES, GUIDING_TYPE_PARALLAX_AWARE_VMM);
integrator->set_guiding_distribution_type(guiding_distribution_type);
}
DenoiseParams denoise_params = get_denoise_params(b_scene, b_view_layer, background);
/* No denoising support for vertex color baking, vertices packed into image
@@ -753,17 +737,6 @@ void BlenderSync::sync_render_passes(BL::RenderLayer &b_rlay, BL::ViewLayer &b_v
pass_add(scene, PASS_DENOISING_DEPTH, "Denoising Depth", PassMode::NOISY);
}
#ifdef WITH_CYCLES_DEBUG
b_engine.add_pass("Guiding Color", 3, "RGB", b_view_layer.name().c_str());
pass_add(scene, PASS_GUIDING_COLOR, "Guiding Color", PassMode::NOISY);
b_engine.add_pass("Guiding Probability", 1, "X", b_view_layer.name().c_str());
pass_add(scene, PASS_GUIDING_PROBABILITY, "Guiding Probability", PassMode::NOISY);
b_engine.add_pass("Guiding Average Roughness", 1, "X", b_view_layer.name().c_str());
pass_add(scene, PASS_GUIDING_AVG_ROUGHNESS, "Guiding Average Roughness", PassMode::NOISY);
#endif
/* Custom AOV passes. */
BL::ViewLayer::aovs_iterator b_aov_iter;
for (b_view_layer.aovs.begin(b_aov_iter); b_aov_iter != b_view_layer.aovs.end(); ++b_aov_iter) {

View File

@@ -515,7 +515,7 @@ void BVHSpatialSplit::split_reference(const BVHBuild &builder,
int dim,
float pos)
{
/* Initialize bounding-boxes. */
/* initialize boundboxes */
BoundBox left_bounds = BoundBox::empty;
BoundBox right_bounds = BoundBox::empty;

View File

@@ -69,7 +69,6 @@ if(CYCLES_STANDALONE_REPOSITORY)
_set_default(BOOST_ROOT "${_cycles_lib_dir}/boost")
_set_default(BLOSC_ROOT_DIR "${_cycles_lib_dir}/blosc")
_set_default(EMBREE_ROOT_DIR "${_cycles_lib_dir}/embree")
_set_default(EPOXY_ROOT_DIR "${_cycles_lib_dir}/epoxy")
_set_default(IMATH_ROOT_DIR "${_cycles_lib_dir}/imath")
_set_default(GLEW_ROOT_DIR "${_cycles_lib_dir}/glew")
_set_default(JPEG_ROOT "${_cycles_lib_dir}/jpeg")
@@ -92,11 +91,7 @@ if(CYCLES_STANDALONE_REPOSITORY)
_set_default(USD_ROOT_DIR "${_cycles_lib_dir}/usd")
_set_default(WEBP_ROOT_DIR "${_cycles_lib_dir}/webp")
_set_default(ZLIB_ROOT "${_cycles_lib_dir}/zlib")
if(WIN32)
set(LEVEL_ZERO_ROOT_DIR ${_cycles_lib_dir}/level_zero)
else()
set(LEVEL_ZERO_ROOT_DIR ${_cycles_lib_dir}/level-zero)
endif()
_set_default(LEVEL_ZERO_ROOT_DIR "${_cycles_lib_dir}/level-zero")
_set_default(SYCL_ROOT_DIR "${_cycles_lib_dir}/dpcpp")
# Ignore system libraries
@@ -104,10 +99,6 @@ if(CYCLES_STANDALONE_REPOSITORY)
else()
unset(_cycles_lib_dir)
endif()
else()
if(EXISTS ${LIBDIR})
set(_cycles_lib_dir ${LIBDIR})
endif()
endif()
###########################################################################
@@ -206,17 +197,17 @@ endif()
if(CYCLES_STANDALONE_REPOSITORY)
if(MSVC AND EXISTS ${_cycles_lib_dir})
set(OPENEXR_INCLUDE_DIR ${OPENEXR_ROOT_DIR}/include)
set(OPENEXR_INCLUDE_DIRS ${OPENEXR_INCLUDE_DIR} ${OPENEXR_ROOT_DIR}/include/OpenEXR ${IMATH_ROOT_DIR}/include ${IMATH_ROOT_DIR}/include/Imath)
set(OPENEXR_INCLUDE_DIRS ${OPENEXR_INCLUDE_DIR} ${OPENEXR_ROOT_DIR}/include/OpenEXR)
set(OPENEXR_LIBRARIES
optimized ${OPENEXR_ROOT_DIR}/lib/OpenEXR_s.lib
optimized ${OPENEXR_ROOT_DIR}/lib/OpenEXRCore_s.lib
optimized ${OPENEXR_ROOT_DIR}/lib/Iex_s.lib
optimized ${IMATH_ROOT_DIR}/lib/Imath_s.lib
optimized ${OPENEXR_ROOT_DIR}/lib/Half_s.lib
optimized ${OPENEXR_ROOT_DIR}/lib/IlmImf_s.lib
optimized ${OPENEXR_ROOT_DIR}/lib/Imath_s.lib
optimized ${OPENEXR_ROOT_DIR}/lib/IlmThread_s.lib
debug ${OPENEXR_ROOT_DIR}/lib/OpenEXR_s_d.lib
debug ${OPENEXR_ROOT_DIR}/lib/OpenEXRCore_s_d.lib
debug ${OPENEXR_ROOT_DIR}/lib/Iex_s_d.lib
debug ${IMATH_ROOT_DIR}/lib/Imath_s_d.lib
debug ${OPENEXR_ROOT_DIR}/lib/Half_s_d.lib
debug ${OPENEXR_ROOT_DIR}/lib/IlmImf_s_d.lib
debug ${OPENEXR_ROOT_DIR}/lib/Imath_s_d.lib
debug ${OPENEXR_ROOT_DIR}/lib/IlmThread_s_d.lib
)
else()
@@ -273,31 +264,6 @@ if(CYCLES_STANDALONE_REPOSITORY AND WITH_CYCLES_OSL)
endif()
endif()
###########################################################################
# OpenPGL
###########################################################################
if(WITH_CYCLES_PATH_GUIDING)
if(EXISTS ${_cycles_lib_dir})
set(openpgl_DIR ${_cycles_lib_dir}/openpgl/lib/cmake/openpgl)
endif()
find_package(openpgl QUIET)
if(openpgl_FOUND)
if(WIN32)
get_target_property(OPENPGL_LIBRARIES_RELEASE openpgl::openpgl LOCATION_RELEASE)
get_target_property(OPENPGL_LIBRARIES_DEBUG openpgl::openpgl LOCATION_DEBUG)
set(OPENPGL_LIBRARIES optimized ${OPENPGL_LIBRARIES_RELEASE} debug ${OPENPGL_LIBRARIES_DEBUG})
else()
get_target_property(OPENPGL_LIBRARIES openpgl::openpgl LOCATION)
endif()
get_target_property(OPENPGL_INCLUDE_DIR openpgl::openpgl INTERFACE_INCLUDE_DIRECTORIES)
else()
set(WITH_CYCLES_PATH_GUIDING OFF)
message(STATUS "OpenPGL not found, disabling WITH_CYCLES_PATH_GUIDING")
endif()
endif()
###########################################################################
# OpenColorIO
###########################################################################
@@ -353,8 +319,8 @@ if(CYCLES_STANDALONE_REPOSITORY)
if(NOT BOOST_VERSION)
message(FATAL_ERROR "Unable to determine Boost version")
endif()
set(BOOST_POSTFIX "vc142-mt-x64-${BOOST_VERSION}.lib")
set(BOOST_DEBUG_POSTFIX "vc142-mt-gd-x64-${BOOST_VERSION}.lib")
set(BOOST_POSTFIX "vc141-mt-x64-${BOOST_VERSION}.lib")
set(BOOST_DEBUG_POSTFIX "vc141-mt-gd-x64-${BOOST_VERSION}.lib")
set(BOOST_LIBRARIES
optimized ${BOOST_ROOT}/lib/libboost_date_time-${BOOST_POSTFIX}
optimized ${BOOST_ROOT}/lib/libboost_iostreams-${BOOST_POSTFIX}
@@ -493,7 +459,6 @@ if(CYCLES_STANDALONE_REPOSITORY AND WITH_CYCLES_NANOVDB)
if(MSVC AND EXISTS ${_cycles_lib_dir})
set(NANOVDB_INCLUDE_DIR ${NANOVDB_ROOT_DIR}/include)
set(NANOVDB_INCLUDE_DIRS ${NANOVDB_INCLUDE_DIR})
else()
find_package(NanoVDB REQUIRED)
endif()
@@ -545,7 +510,7 @@ endif()
if(CYCLES_STANDALONE_REPOSITORY)
if((WITH_CYCLES_STANDALONE AND WITH_CYCLES_STANDALONE_GUI) OR
WITH_CYCLES_HYDRA_RENDER_DELEGATE)
WITH_CYCLES_HYDRA_RENDER_DELEGATE)
if(MSVC AND EXISTS ${_cycles_lib_dir})
set(Epoxy_LIBRARIES "${_cycles_lib_dir}/epoxy/lib/epoxy.lib")
set(Epoxy_INCLUDE_DIRS "${_cycles_lib_dir}/epoxy/include")

View File

@@ -118,9 +118,6 @@ macro(cycles_external_libraries_append libraries)
if(WITH_ALEMBIC)
list(APPEND ${libraries} ${ALEMBIC_LIBRARIES})
endif()
if(WITH_PATH_GUIDING)
target_link_libraries(${target} ${OPENPGL_LIBRARIES})
endif()
list(APPEND ${libraries}
${OPENIMAGEIO_LIBRARIES}
@@ -172,13 +169,13 @@ macro(cycles_install_libraries target)
FILES
${TBB_ROOT_DIR}/bin/tbb_debug${CMAKE_SHARED_LIBRARY_SUFFIX}
${OPENVDB_ROOT_DIR}/bin/openvdb_d${CMAKE_SHARED_LIBRARY_SUFFIX}
DESTINATION ${CMAKE_INSTALL_PREFIX})
DESTINATION $<TARGET_FILE_DIR:${target}>)
else()
install(
FILES
${TBB_ROOT_DIR}/bin/tbb${CMAKE_SHARED_LIBRARY_SUFFIX}
${OPENVDB_ROOT_DIR}/bin/openvdb${CMAKE_SHARED_LIBRARY_SUFFIX}
DESTINATION ${CMAKE_INSTALL_PREFIX})
DESTINATION $<TARGET_FILE_DIR:${target}>)
endif()
endif()
endmacro()

View File

@@ -7,7 +7,6 @@
/* Used for `info.denoisers`. */
/* TODO(sergey): The denoisers are probably to be moved completely out of the device into their
* own class. But until then keep API consistent with how it used to work before. */
#include "util/guiding.h"
#include "util/openimagedenoise.h"
CCL_NAMESPACE_BEGIN
@@ -28,12 +27,6 @@ void device_cpu_info(vector<DeviceInfo> &devices)
info.has_osl = true;
info.has_nanovdb = true;
info.has_profiling = true;
if (guiding_supported()) {
info.has_guiding = true;
}
else {
info.has_guiding = false;
}
if (openimagedenoise_supported()) {
info.denoisers |= DENOISER_OPENIMAGEDENOISE;
}

View File

@@ -28,6 +28,7 @@
#include "kernel/device/cpu/kernel.h"
#include "kernel/types.h"
#include "kernel/osl/shader.h"
#include "kernel/osl/globals.h"
// clang-format on
@@ -38,7 +39,6 @@
#include "util/debug.h"
#include "util/foreach.h"
#include "util/function.h"
#include "util/guiding.h"
#include "util/log.h"
#include "util/map.h"
#include "util/openimagedenoise.h"
@@ -279,23 +279,6 @@ void CPUDevice::build_bvh(BVH *bvh, Progress &progress, bool refit)
Device::build_bvh(bvh, progress, refit);
}
void *CPUDevice::get_guiding_device() const
{
#ifdef WITH_PATH_GUIDING
if (!guiding_device) {
if (guiding_device_type() == 8) {
guiding_device = make_unique<openpgl::cpp::Device>(PGL_DEVICE_TYPE_CPU_8);
}
else if (guiding_device_type() == 4) {
guiding_device = make_unique<openpgl::cpp::Device>(PGL_DEVICE_TYPE_CPU_4);
}
}
return guiding_device.get();
#else
return nullptr;
#endif
}
void CPUDevice::get_cpu_kernel_thread_globals(
vector<CPUKernelThreadGlobals> &kernel_thread_globals)
{

View File

@@ -23,12 +23,10 @@
#include "kernel/device/cpu/kernel.h"
#include "kernel/device/cpu/globals.h"
#include "kernel/osl/shader.h"
#include "kernel/osl/globals.h"
// clang-format on
#include "util/guiding.h"
#include "util/unique_ptr.h"
CCL_NAMESPACE_BEGIN
class CPUDevice : public Device {
@@ -45,9 +43,6 @@ class CPUDevice : public Device {
RTCScene embree_scene = NULL;
RTCDevice embree_device;
#endif
#ifdef WITH_PATH_GUIDING
mutable unique_ptr<openpgl::cpp::Device> guiding_device;
#endif
CPUDevice(const DeviceInfo &info_, Stats &stats_, Profiler &profiler_);
~CPUDevice();
@@ -78,8 +73,6 @@ class CPUDevice : public Device {
void build_bvh(BVH *bvh, Progress &progress, bool refit) override;
void *get_guiding_device() const override;
virtual void get_cpu_kernel_thread_globals(
vector<CPUKernelThreadGlobals> &kernel_thread_globals) override;
virtual void *get_cpu_osl_memory() override;

View File

@@ -3,7 +3,10 @@
#include "device/cpu/kernel_thread_globals.h"
// clang-format off
#include "kernel/osl/shader.h"
#include "kernel/osl/globals.h"
// clang-format on
#include "util/profiling.h"
@@ -14,35 +17,25 @@ CPUKernelThreadGlobals::CPUKernelThreadGlobals(const KernelGlobalsCPU &kernel_gl
Profiler &cpu_profiler)
: KernelGlobalsCPU(kernel_globals), cpu_profiler_(cpu_profiler)
{
clear_runtime_pointers();
reset_runtime_memory();
#ifdef WITH_OSL
OSLGlobals::thread_init(this, static_cast<OSLGlobals *>(osl_globals_memory));
OSLShader::thread_init(this, reinterpret_cast<OSLGlobals *>(osl_globals_memory));
#else
(void)osl_globals_memory;
#endif
#ifdef WITH_PATH_GUIDING
opgl_path_segment_storage = new openpgl::cpp::PathSegmentStorage();
#endif
}
CPUKernelThreadGlobals::CPUKernelThreadGlobals(CPUKernelThreadGlobals &&other) noexcept
: KernelGlobalsCPU(std::move(other)), cpu_profiler_(other.cpu_profiler_)
{
other.clear_runtime_pointers();
other.reset_runtime_memory();
}
CPUKernelThreadGlobals::~CPUKernelThreadGlobals()
{
#ifdef WITH_OSL
OSLGlobals::thread_free(this);
#endif
#ifdef WITH_PATH_GUIDING
delete opgl_path_segment_storage;
delete opgl_surface_sampling_distribution;
delete opgl_volume_sampling_distribution;
OSLShader::thread_free(this);
#endif
}
@@ -54,25 +47,16 @@ CPUKernelThreadGlobals &CPUKernelThreadGlobals::operator=(CPUKernelThreadGlobals
*static_cast<KernelGlobalsCPU *>(this) = *static_cast<KernelGlobalsCPU *>(&other);
other.clear_runtime_pointers();
other.reset_runtime_memory();
return *this;
}
void CPUKernelThreadGlobals::clear_runtime_pointers()
void CPUKernelThreadGlobals::reset_runtime_memory()
{
#ifdef WITH_OSL
osl = nullptr;
#endif
#ifdef WITH_PATH_GUIDING
opgl_sample_data_storage = nullptr;
opgl_guiding_field = nullptr;
opgl_path_segment_storage = nullptr;
opgl_surface_sampling_distribution = nullptr;
opgl_volume_sampling_distribution = nullptr;
#endif
}
void CPUKernelThreadGlobals::start_profiling()

View File

@@ -36,7 +36,7 @@ class CPUKernelThreadGlobals : public KernelGlobalsCPU {
void stop_profiling();
protected:
void clear_runtime_pointers();
void reset_runtime_memory();
Profiler &cpu_profiler_;
};

View File

@@ -79,7 +79,7 @@ bool CUDADeviceQueue::enqueue(DeviceKernel kernel,
return false;
}
debug_enqueue_begin(kernel, work_size);
debug_enqueue(kernel, work_size);
const CUDAContextScope scope(cuda_device_);
const CUDADeviceKernel &cuda_kernel = cuda_device_->kernels.get(kernel);
@@ -121,8 +121,6 @@ bool CUDADeviceQueue::enqueue(DeviceKernel kernel,
0),
"enqueue");
debug_enqueue_end();
return !(cuda_device_->have_error());
}

View File

@@ -352,7 +352,6 @@ DeviceInfo Device::get_multi_device(const vector<DeviceInfo> &subdevices,
info.has_nanovdb = true;
info.has_osl = true;
info.has_guiding = true;
info.has_profiling = true;
info.has_peer_memory = false;
info.use_metalrt = false;
@@ -400,7 +399,6 @@ DeviceInfo Device::get_multi_device(const vector<DeviceInfo> &subdevices,
/* Accumulate device info. */
info.has_nanovdb &= device.has_nanovdb;
info.has_osl &= device.has_osl;
info.has_guiding &= device.has_guiding;
info.has_profiling &= device.has_profiling;
info.has_peer_memory |= device.has_peer_memory;
info.use_metalrt |= device.use_metalrt;

View File

@@ -66,7 +66,6 @@ class DeviceInfo {
bool display_device; /* GPU is used as a display device. */
bool has_nanovdb; /* Support NanoVDB volumes. */
bool has_osl; /* Support Open Shading Language. */
bool has_guiding; /* Support path guiding. */
bool has_profiling; /* Supports runtime collection of profiling info. */
bool has_peer_memory; /* GPU has P2P access to memory of another GPU. */
bool has_gpu_queue; /* Device supports GPU queue. */
@@ -85,7 +84,6 @@ class DeviceInfo {
display_device = false;
has_nanovdb = false;
has_osl = false;
has_guiding = false;
has_profiling = false;
has_peer_memory = false;
has_gpu_queue = false;
@@ -219,15 +217,6 @@ class Device {
return false;
}
/* Guiding */
/* Returns path guiding device handle. */
virtual void *get_guiding_device() const
{
LOG(ERROR) << "Request guiding field from a device which does not support it.";
return nullptr;
}
/* Buffer denoising. */
/* Returns true if task is fully handled. */

View File

@@ -79,7 +79,7 @@ bool HIPDeviceQueue::enqueue(DeviceKernel kernel,
return false;
}
debug_enqueue_begin(kernel, work_size);
debug_enqueue(kernel, work_size);
const HIPContextScope scope(hip_device_);
const HIPDeviceKernel &hip_kernel = hip_device_->kernels.get(kernel);
@@ -120,8 +120,6 @@ bool HIPDeviceQueue::enqueue(DeviceKernel kernel,
0),
"enqueue");
debug_enqueue_end();
return !(hip_device_->have_error());
}

View File

@@ -308,29 +308,26 @@ MetalKernelPipeline *ShaderCache::get_best_pipeline(DeviceKernel kernel, const M
bool MetalKernelPipeline::should_use_binary_archive() const
{
/* Issues with binary archives in older macOS versions. */
if (@available(macOS 13.0, *)) {
if (auto str = getenv("CYCLES_METAL_DISABLE_BINARY_ARCHIVES")) {
if (atoi(str) != 0) {
/* Don't archive if we have opted out by env var. */
return false;
}
if (auto str = getenv("CYCLES_METAL_DISABLE_BINARY_ARCHIVES")) {
if (atoi(str) != 0) {
/* Don't archive if we have opted out by env var. */
return false;
}
if (pso_type == PSO_GENERIC) {
/* Archive the generic kernels. */
return true;
}
if (device_kernel >= DEVICE_KERNEL_INTEGRATOR_SHADE_BACKGROUND &&
device_kernel <= DEVICE_KERNEL_INTEGRATOR_SHADE_SHADOW) {
/* Archive all shade kernels - they take a long time to compile. */
return true;
}
/* The remaining kernels are all fast to compile. They may get cached by the system shader
* cache, but will be quick to regenerate if not. */
}
if (pso_type == PSO_GENERIC) {
/* Archive the generic kernels. */
return true;
}
if (device_kernel >= DEVICE_KERNEL_INTEGRATOR_SHADE_BACKGROUND &&
device_kernel <= DEVICE_KERNEL_INTEGRATOR_SHADE_SHADOW) {
/* Archive all shade kernels - they take a long time to compile. */
return true;
}
/* The remaining kernels are all fast to compile. They may get cached by the system shader cache,
* but will be quick to regenerate if not. */
return false;
}

View File

@@ -25,12 +25,10 @@ static OneAPIDLLInterface oneapi_dll;
#ifdef _WIN32
# define LOAD_ONEAPI_SHARED_LIBRARY(path) (void *)(LoadLibrary(path))
# define LOAD_ONEAPI_SHARED_LIBRARY_ERROR() GetLastError()
# define FREE_SHARED_LIBRARY(handle) FreeLibrary((HMODULE)handle)
# define GET_SHARED_LIBRARY_SYMBOL(handle, name) GetProcAddress((HMODULE)handle, name)
#elif __linux__
# define LOAD_ONEAPI_SHARED_LIBRARY(path) dlopen(path, RTLD_NOW)
# define LOAD_ONEAPI_SHARED_LIBRARY_ERROR() dlerror()
# define FREE_SHARED_LIBRARY(handle) dlclose(handle)
# define GET_SHARED_LIBRARY_SYMBOL(handle, name) dlsym(handle, name)
#endif
@@ -51,8 +49,8 @@ bool device_oneapi_init()
/* This shouldn't happen, but it still makes sense to have a branch for this. */
if (lib_handle == NULL) {
LOG(ERROR) << "oneAPI kernel shared library cannot be loaded: "
<< LOAD_ONEAPI_SHARED_LIBRARY_ERROR();
LOG(ERROR) << "oneAPI kernel shared library cannot be loaded for some reason. This should not "
"happen, however, it occurs hence oneAPI rendering will be disabled";
return false;
}

View File

@@ -77,7 +77,7 @@ bool OneapiDeviceQueue::enqueue(DeviceKernel kernel,
void **args = const_cast<void **>(_args.values);
debug_enqueue_begin(kernel, signed_kernel_work_size);
debug_enqueue(kernel, signed_kernel_work_size);
assert(signed_kernel_work_size >= 0);
size_t kernel_work_size = (size_t)signed_kernel_work_size;
@@ -97,8 +97,6 @@ bool OneapiDeviceQueue::enqueue(DeviceKernel kernel,
oneapi_device_->oneapi_error_message() + "\"");
}
debug_enqueue_end();
return is_finished_ok;
}

View File

@@ -46,7 +46,7 @@ bool OptiXDeviceQueue::enqueue(DeviceKernel kernel,
return false;
}
debug_enqueue_begin(kernel, work_size);
debug_enqueue(kernel, work_size);
const CUDAContextScope scope(cuda_device_);
@@ -131,8 +131,6 @@ bool OptiXDeviceQueue::enqueue(DeviceKernel kernel,
1,
1));
debug_enqueue_end();
return !(optix_device->have_error());
}

View File

@@ -12,13 +12,9 @@
CCL_NAMESPACE_BEGIN
DeviceQueue::DeviceQueue(Device *device)
: device(device),
last_kernels_enqueued_(0),
last_sync_time_(0.0),
is_per_kernel_performance_(false)
: device(device), last_kernels_enqueued_(0), last_sync_time_(0.0)
{
DCHECK_NE(device, nullptr);
is_per_kernel_performance_ = getenv("CYCLES_DEBUG_PER_KERNEL_PERFORMANCE");
}
DeviceQueue::~DeviceQueue()
@@ -37,17 +33,11 @@ DeviceQueue::~DeviceQueue()
});
VLOG_DEVICE_STATS << "GPU queue stats:";
double total_time = 0.0;
for (const auto &[mask, time] : stats_sorted) {
total_time += time;
VLOG_DEVICE_STATS << " " << std::setfill(' ') << std::setw(10) << std::fixed
<< std::setprecision(5) << std::right << time
<< "s: " << device_kernel_mask_as_string(mask);
}
if (is_per_kernel_performance_)
VLOG_DEVICE_STATS << "GPU queue total time: " << std::fixed << std::setprecision(5)
<< total_time;
}
}
@@ -60,7 +50,7 @@ void DeviceQueue::debug_init_execution()
last_kernels_enqueued_ = 0;
}
void DeviceQueue::debug_enqueue_begin(DeviceKernel kernel, const int work_size)
void DeviceQueue::debug_enqueue(DeviceKernel kernel, const int work_size)
{
if (VLOG_DEVICE_STATS_IS_ON) {
VLOG_DEVICE_STATS << "GPU queue launch " << device_kernel_as_string(kernel) << ", work_size "
@@ -70,13 +60,6 @@ void DeviceQueue::debug_enqueue_begin(DeviceKernel kernel, const int work_size)
last_kernels_enqueued_ |= (uint64_t(1) << (uint64_t)kernel);
}
void DeviceQueue::debug_enqueue_end()
{
if (VLOG_DEVICE_STATS_IS_ON && is_per_kernel_performance_) {
synchronize();
}
}
void DeviceQueue::debug_synchronize()
{
if (VLOG_DEVICE_STATS_IS_ON) {
@@ -84,11 +67,7 @@ void DeviceQueue::debug_synchronize()
const double elapsed_time = new_time - last_sync_time_;
VLOG_DEVICE_STATS << "GPU queue synchronize, elapsed " << std::setw(10) << elapsed_time << "s";
/* There is no sense to have an entries in the performance data
* container without related kernel information. */
if (last_kernels_enqueued_ != 0) {
stats_kernel_time_[last_kernels_enqueued_] += elapsed_time;
}
stats_kernel_time_[last_kernels_enqueued_] += elapsed_time;
last_sync_time_ = new_time;
}

View File

@@ -162,8 +162,7 @@ class DeviceQueue {
/* Implementations call these from the corresponding methods to generate debugging logs. */
void debug_init_execution();
void debug_enqueue_begin(DeviceKernel kernel, const int work_size);
void debug_enqueue_end();
void debug_enqueue(DeviceKernel kernel, const int work_size);
void debug_synchronize();
string debug_active_kernels();
@@ -173,9 +172,6 @@ class DeviceQueue {
double last_sync_time_;
/* Accumulated execution time for combinations of kernels launched together. */
map<DeviceKernelMask, double> stats_kernel_time_;
/* If it is true, then a performance statistics in the debugging logs will have focus on kernels
* and an explicit queue synchronization will be added after each kernel execution. */
bool is_per_kernel_performance_;
};
CCL_NAMESPACE_END

View File

@@ -65,12 +65,6 @@ if(WITH_OPENIMAGEDENOISE)
)
endif()
if(WITH_CYCLES_PATH_GUIDING)
list(APPEND LIB
${OPENPGL_LIBRARIES}
)
endif()
include_directories(${INC})
include_directories(SYSTEM ${INC_SYS})

View File

@@ -1,32 +0,0 @@
/* SPDX-License-Identifier: Apache-2.0
* Copyright 2011-2022 Blender Foundation */
#pragma once
#include "kernel/types.h"
CCL_NAMESPACE_BEGIN
struct GuidingParams {
/* The subset of path guiding parameters that can trigger a creation/rebuild
* of the guiding field. */
bool use = false;
bool use_surface_guiding = false;
bool use_volume_guiding = false;
GuidingDistributionType type = GUIDING_TYPE_PARALLAX_AWARE_VMM;
int training_samples = 128;
bool deterministic = false;
GuidingParams() = default;
bool modified(const GuidingParams &other) const
{
return !((use == other.use) && (use_surface_guiding == other.use_surface_guiding) &&
(use_volume_guiding == other.use_volume_guiding) && (type == other.type) &&
(training_samples == other.training_samples) &&
(deterministic == other.deterministic));
}
};
CCL_NAMESPACE_END

View File

@@ -185,25 +185,11 @@ void PathTrace::render_pipeline(RenderWork render_work)
rebalance(render_work);
/* Prepare all per-thread guiding structures before we start with the next rendering
* iteration/progression. */
const bool use_guiding = device_scene_->data.integrator.use_guiding;
if (use_guiding) {
guiding_prepare_structures();
}
path_trace(render_work);
if (render_cancel_.is_requested) {
return;
}
/* Update the guiding field using the training data/samples collected during the rendering
* iteration/progression. */
const bool train_guiding = device_scene_->data.integrator.train_guiding;
if (use_guiding && train_guiding) {
guiding_update_structures();
}
adaptive_sample(render_work);
if (render_cancel_.is_requested) {
return;
@@ -1255,122 +1241,4 @@ string PathTrace::full_report() const
return result;
}
void PathTrace::set_guiding_params(const GuidingParams &guiding_params, const bool reset)
{
#ifdef WITH_PATH_GUIDING
if (guiding_params_.modified(guiding_params)) {
guiding_params_ = guiding_params;
if (guiding_params_.use) {
PGLFieldArguments field_args;
switch (guiding_params_.type) {
default:
/* Parallax-aware von Mises-Fisher mixture models. */
case GUIDING_TYPE_PARALLAX_AWARE_VMM: {
pglFieldArgumentsSetDefaults(
field_args,
PGL_SPATIAL_STRUCTURE_TYPE::PGL_SPATIAL_STRUCTURE_KDTREE,
PGL_DIRECTIONAL_DISTRIBUTION_TYPE::PGL_DIRECTIONAL_DISTRIBUTION_PARALLAX_AWARE_VMM);
break;
}
/* Directional quad-trees. */
case GUIDING_TYPE_DIRECTIONAL_QUAD_TREE: {
pglFieldArgumentsSetDefaults(
field_args,
PGL_SPATIAL_STRUCTURE_TYPE::PGL_SPATIAL_STRUCTURE_KDTREE,
PGL_DIRECTIONAL_DISTRIBUTION_TYPE::PGL_DIRECTIONAL_DISTRIBUTION_QUADTREE);
break;
}
/* von Mises-Fisher mixture models. */
case GUIDING_TYPE_VMM: {
pglFieldArgumentsSetDefaults(
field_args,
PGL_SPATIAL_STRUCTURE_TYPE::PGL_SPATIAL_STRUCTURE_KDTREE,
PGL_DIRECTIONAL_DISTRIBUTION_TYPE::PGL_DIRECTIONAL_DISTRIBUTION_VMM);
break;
}
}
# if OPENPGL_VERSION_MINOR >= 4
field_args.deterministic = guiding_params.deterministic;
# endif
openpgl::cpp::Device *guiding_device = static_cast<openpgl::cpp::Device *>(
device_->get_guiding_device());
if (guiding_device) {
guiding_sample_data_storage_ = make_unique<openpgl::cpp::SampleStorage>();
guiding_field_ = make_unique<openpgl::cpp::Field>(guiding_device, field_args);
}
else {
guiding_sample_data_storage_ = nullptr;
guiding_field_ = nullptr;
}
}
else {
guiding_sample_data_storage_ = nullptr;
guiding_field_ = nullptr;
}
}
else if (reset) {
if (guiding_field_) {
guiding_field_->Reset();
}
}
#else
(void)guiding_params;
(void)reset;
#endif
}
void PathTrace::guiding_prepare_structures()
{
#ifdef WITH_PATH_GUIDING
const bool train = (guiding_params_.training_samples == 0) ||
(guiding_field_->GetIteration() < guiding_params_.training_samples);
for (auto &&path_trace_work : path_trace_works_) {
path_trace_work->guiding_init_kernel_globals(
guiding_field_.get(), guiding_sample_data_storage_.get(), train);
}
if (train) {
/* For training the guiding distribution we need to force the number of samples
* per update to be limited, for reproducible results and reasonable training size.
*
* Idea: we could stochastically discard samples with a probability of 1/num_samples_per_update
* we can then update only after the num_samples_per_update iterations are rendered. */
render_scheduler_.set_limit_samples_per_update(4);
}
else {
render_scheduler_.set_limit_samples_per_update(0);
}
#endif
}
void PathTrace::guiding_update_structures()
{
#ifdef WITH_PATH_GUIDING
VLOG_WORK << "Update path guiding structures";
VLOG_DEBUG << "Number of surface samples: " << guiding_sample_data_storage_->GetSizeSurface();
VLOG_DEBUG << "Number of volume samples: " << guiding_sample_data_storage_->GetSizeVolume();
const size_t num_valid_samples = guiding_sample_data_storage_->GetSizeSurface() +
guiding_sample_data_storage_->GetSizeVolume();
/* we wait until we have at least 1024 samples */
if (num_valid_samples >= 1024) {
# if OPENPGL_VERSION_MINOR < 4
const size_t num_samples = 1;
guiding_field_->Update(*guiding_sample_data_storage_, num_samples);
# else
guiding_field_->Update(*guiding_sample_data_storage_);
# endif
guiding_update_count++;
VLOG_DEBUG << "Path guiding field valid: " << guiding_field_->Validate();
guiding_sample_data_storage_->Clear();
}
#endif
}
CCL_NAMESPACE_END

View File

@@ -4,15 +4,11 @@
#pragma once
#include "integrator/denoiser.h"
#include "integrator/guiding.h"
#include "integrator/pass_accessor.h"
#include "integrator/path_trace_work.h"
#include "integrator/work_balancer.h"
#include "session/buffers.h"
#include "util/function.h"
#include "util/guiding.h"
#include "util/thread.h"
#include "util/unique_ptr.h"
#include "util/vector.h"
@@ -93,10 +89,6 @@ class PathTrace {
* Use this to configure the adaptive sampler before rendering any samples. */
void set_adaptive_sampling(const AdaptiveSampling &adaptive_sampling);
/* Set the parameters for guiding.
* Use to setup the guiding structures before each rendering iteration.*/
void set_guiding_params(const GuidingParams &params, const bool reset);
/* Sets output driver for render buffer output. */
void set_output_driver(unique_ptr<OutputDriver> driver);
@@ -213,15 +205,6 @@ class PathTrace {
void write_tile_buffer(const RenderWork &render_work);
void finalize_full_buffer_on_disk(const RenderWork &render_work);
/* Updates/initializes the guiding structures after a rendering iteration.
* The structures are updated using the training data/samples generated during the previous
* rendering iteration */
void guiding_update_structures();
/* Prepares the per-kernel thread related guiding structures (e.g., PathSegmentStorage,
* pointers to the global Field and SegmentStorage)*/
void guiding_prepare_structures();
/* Get number of samples in the current state of the render buffers. */
int get_num_samples_in_buffer();
@@ -282,22 +265,6 @@ class PathTrace {
/* Denoiser device descriptor which holds the denoised big tile for multi-device workloads. */
unique_ptr<PathTraceWork> big_tile_denoise_work_;
#ifdef WITH_PATH_GUIDING
/* Guiding related attributes */
GuidingParams guiding_params_;
/* The guiding field which holds the representation of the incident radiance field for the
* complete scene. */
unique_ptr<openpgl::cpp::Field> guiding_field_;
/* The storage container which holds the training data/samples generated during the last
* rendering iteration. */
unique_ptr<openpgl::cpp::SampleStorage> guiding_sample_data_storage_;
/* The number of already performed training iterations for the guiding field.*/
int guiding_update_count = 0;
#endif
/* State which is common for all the steps of the render work.
* Is brought up to date in the `render()` call and is accessed from all the steps involved into
* rendering the work. */

View File

@@ -140,13 +140,6 @@ class PathTraceWork {
return device_;
}
#ifdef WITH_PATH_GUIDING
/* Initializes the per-thread guiding kernel data. */
virtual void guiding_init_kernel_globals(void *, void *, const bool)
{
}
#endif
protected:
PathTraceWork(Device *device,
Film *film,

View File

@@ -6,7 +6,6 @@
#include "device/cpu/kernel.h"
#include "device/device.h"
#include "kernel/film/write.h"
#include "kernel/integrator/path_state.h"
#include "integrator/pass_accessor_cpu.h"
@@ -146,13 +145,6 @@ void PathTraceWorkCPU::render_samples_full_pipeline(KernelGlobalsCPU *kernel_glo
kernels_.integrator_megakernel(kernel_globals, state, render_buffer);
#ifdef WITH_PATH_GUIDING
if (kernel_globals->data.integrator.train_guiding) {
/* Push the generated sample data to the global sample data storage. */
guiding_push_sample_data_to_global_storage(kernel_globals, state, render_buffer);
}
#endif
if (shadow_catcher_state) {
kernels_.integrator_megakernel(kernel_globals, shadow_catcher_state, render_buffer);
}
@@ -284,106 +276,4 @@ void PathTraceWorkCPU::cryptomatte_postproces()
});
}
#ifdef WITH_PATH_GUIDING
/* Note: It seems that this is called before every rendering iteration/progression and not once per
* rendering. May be we find a way to call it only once per rendering. */
void PathTraceWorkCPU::guiding_init_kernel_globals(void *guiding_field,
void *sample_data_storage,
const bool train)
{
/* Linking the global guiding structures (e.g., Field and SampleStorage) to the per-thread
* kernel globals. */
for (int thread_index = 0; thread_index < kernel_thread_globals_.size(); thread_index++) {
CPUKernelThreadGlobals &kg = kernel_thread_globals_[thread_index];
openpgl::cpp::Field *field = (openpgl::cpp::Field *)guiding_field;
/* Allocate sampling distributions. */
kg.opgl_guiding_field = field;
# if PATH_GUIDING_LEVEL >= 4
if (kg.opgl_surface_sampling_distribution) {
delete kg.opgl_surface_sampling_distribution;
kg.opgl_surface_sampling_distribution = nullptr;
}
if (kg.opgl_volume_sampling_distribution) {
delete kg.opgl_volume_sampling_distribution;
kg.opgl_volume_sampling_distribution = nullptr;
}
if (field) {
kg.opgl_surface_sampling_distribution = new openpgl::cpp::SurfaceSamplingDistribution(field);
kg.opgl_volume_sampling_distribution = new openpgl::cpp::VolumeSamplingDistribution(field);
}
# endif
/* Reserve storage for training. */
kg.data.integrator.train_guiding = train;
kg.opgl_sample_data_storage = (openpgl::cpp::SampleStorage *)sample_data_storage;
if (train) {
kg.opgl_path_segment_storage->Reserve(kg.data.integrator.transparent_max_bounce +
kg.data.integrator.max_bounce + 3);
kg.opgl_path_segment_storage->Clear();
}
}
}
void PathTraceWorkCPU::guiding_push_sample_data_to_global_storage(
KernelGlobalsCPU *kg, IntegratorStateCPU *state, ccl_global float *ccl_restrict render_buffer)
{
# ifdef WITH_CYCLES_DEBUG
if (VLOG_WORK_IS_ON) {
/* Check if the generated path segments contain valid values. */
const bool validSegments = kg->opgl_path_segment_storage->ValidateSegments();
if (!validSegments) {
VLOG_WORK << "Guiding: invalid path segments!";
}
}
/* Write debug render pass to validate it matches combined pass. */
pgl_vec3f pgl_final_color = kg->opgl_path_segment_storage->CalculatePixelEstimate(false);
const uint32_t render_pixel_index = INTEGRATOR_STATE(state, path, render_pixel_index);
const uint64_t render_buffer_offset = (uint64_t)render_pixel_index *
kernel_data.film.pass_stride;
ccl_global float *buffer = render_buffer + render_buffer_offset;
float3 final_color = make_float3(pgl_final_color.x, pgl_final_color.y, pgl_final_color.z);
if (kernel_data.film.pass_guiding_color != PASS_UNUSED) {
film_write_pass_float3(buffer + kernel_data.film.pass_guiding_color, final_color);
}
# else
(void)state;
(void)render_buffer;
# endif
/* Convert the path segment representation of the random walk into radiance samples. */
# if PATH_GUIDING_LEVEL >= 2
const bool use_direct_light = kernel_data.integrator.use_guiding_direct_light;
const bool use_mis_weights = kernel_data.integrator.use_guiding_mis_weights;
kg->opgl_path_segment_storage->PrepareSamples(
false, nullptr, use_mis_weights, use_direct_light, false);
# endif
# ifdef WITH_CYCLES_DEBUG
/* Check if the training/radiance samples generated py the path segment storage are valid.*/
if (VLOG_WORK_IS_ON) {
const bool validSamples = kg->opgl_path_segment_storage->ValidateSamples();
if (!validSamples) {
VLOG_WORK
<< "Guiding: path segment storage generated/contains invalid radiance/training samples!";
}
}
# endif
# if PATH_GUIDING_LEVEL >= 3
/* Push radiance samples from current random walk/path to the global sample storage. */
size_t num_samples = 0;
const openpgl::cpp::SampleData *samples = kg->opgl_path_segment_storage->GetSamples(num_samples);
kg->opgl_sample_data_storage->AddSamples(samples, num_samples);
# endif
/* Clear storage for the current path, to be ready for the next path. */
kg->opgl_path_segment_storage->Clear();
}
#endif
CCL_NAMESPACE_END

View File

@@ -16,7 +16,6 @@ CCL_NAMESPACE_BEGIN
struct KernelWorkTile;
struct KernelGlobalsCPU;
struct IntegratorStateCPU;
class CPUKernels;
@@ -51,22 +50,6 @@ class PathTraceWorkCPU : public PathTraceWork {
virtual int adaptive_sampling_converge_filter_count_active(float threshold, bool reset) override;
virtual void cryptomatte_postproces() override;
#ifdef WITH_PATH_GUIDING
/* Initializes the per-thread guiding kernel data. The function sets the pointers to the
* global guiding field and the sample data storage as well es initializes the per-thread
* guided sampling distributions (e.g., SurfaceSamplingDistribution and
* VolumeSamplingDistribution). */
void guiding_init_kernel_globals(void *guiding_field,
void *sample_data_storage,
const bool train) override;
/* Pushes the collected training data/samples of a path to the global sample storage.
* This function is called at the end of a random walk/path generation. */
void guiding_push_sample_data_to_global_storage(KernelGlobalsCPU *kernel_globals,
IntegratorStateCPU *state,
ccl_global float *ccl_restrict render_buffer);
#endif
protected:
/* Core path tracing routine. Renders given work time on the given queue. */
void render_samples_full_pipeline(KernelGlobalsCPU *kernel_globals,

View File

@@ -45,11 +45,6 @@ void RenderScheduler::set_denoiser_params(const DenoiseParams &params)
denoiser_params_ = params;
}
void RenderScheduler::set_limit_samples_per_update(const int limit_samples)
{
limit_samples_per_update_ = limit_samples;
}
void RenderScheduler::set_adaptive_sampling(const AdaptiveSampling &adaptive_sampling)
{
adaptive_sampling_ = adaptive_sampling;
@@ -765,13 +760,7 @@ int RenderScheduler::calculate_num_samples_per_update() const
const double update_interval_in_seconds = guess_display_update_interval_in_seconds();
int num_samples_per_update = max(int(num_samples_in_second * update_interval_in_seconds), 1);
if (limit_samples_per_update_) {
num_samples_per_update = min(limit_samples_per_update_, num_samples_per_update);
}
return num_samples_per_update;
return max(int(num_samples_in_second * update_interval_in_seconds), 1);
}
int RenderScheduler::get_start_sample_to_path_trace() const
@@ -819,7 +808,7 @@ int RenderScheduler::get_num_samples_to_path_trace() const
return 1;
}
int num_samples_per_update = calculate_num_samples_per_update();
const int num_samples_per_update = calculate_num_samples_per_update();
const int path_trace_start_sample = get_start_sample_to_path_trace();
/* Round number of samples to a power of two, so that division of path states into tiles goes in

View File

@@ -187,8 +187,6 @@ class RenderScheduler {
* times, and so on. */
string full_report() const;
void set_limit_samples_per_update(const int limit_samples);
protected:
/* Check whether all work has been scheduled and time limit was not exceeded.
*
@@ -452,10 +450,6 @@ class RenderScheduler {
* (quadratic dependency from the resolution divider): resolution divider of 2 brings render time
* down by a factor of 4. */
int calculate_resolution_divider_for_time(double desired_time, double actual_time);
/* If the number of samples per rendering progression should be limited because of path guiding
* being activated or is still inside its training phase */
int limit_samples_per_update_ = 0;
};
int calculate_resolution_divider_for_resolution(int width, int height, int resolution);

View File

@@ -243,7 +243,6 @@ set(SRC_KERNEL_INTEGRATOR_HEADERS
integrator/intersect_shadow.h
integrator/intersect_subsurface.h
integrator/intersect_volume_stack.h
integrator/guiding.h
integrator/megakernel.h
integrator/mnee.h
integrator/path_state.h
@@ -530,7 +529,7 @@ if(WITH_CYCLES_CUDA_BINARIES)
endif()
if(DEFINED cuda_nvcc_executable AND DEFINED cuda_toolkit_root_dir)
# Compile regular kernel
cycles_cuda_kernel_add(${arch} ${prev_arch} kernel "" "${cuda_sources}" FALSE)
CYCLES_CUDA_KERNEL_ADD(${arch} ${prev_arch} kernel "" "${cuda_sources}" FALSE)
if(WITH_CYCLES_CUDA_BUILD_SERIAL)
set(prev_arch ${arch})
@@ -545,6 +544,8 @@ if(WITH_CYCLES_CUDA_BINARIES)
cycles_set_solution_folder(cycles_kernel_cuda)
endif()
####################################################### START
# HIP module
if(WITH_CYCLES_HIP_BINARIES AND WITH_CYCLES_DEVICE_HIP)
@@ -612,13 +613,14 @@ if(WITH_CYCLES_HIP_BINARIES AND WITH_CYCLES_DEVICE_HIP)
foreach(arch ${CYCLES_HIP_BINARIES_ARCH})
# Compile regular kernel
cycles_hip_kernel_add(${arch} kernel "" "${hip_sources}" FALSE)
CYCLES_HIP_KERNEL_ADD(${arch} kernel "" "${hip_sources}" FALSE)
endforeach()
add_custom_target(cycles_kernel_hip ALL DEPENDS ${hip_fatbins})
cycles_set_solution_folder(cycles_kernel_hip)
endif()
####################################################### END
# OptiX PTX modules
if(WITH_CYCLES_DEVICE_OPTIX AND WITH_CYCLES_CUDA_BINARIES)
@@ -697,11 +699,11 @@ if(WITH_CYCLES_DEVICE_OPTIX AND WITH_CYCLES_CUDA_BINARIES)
delayed_install("${CMAKE_CURRENT_BINARY_DIR}" "${output}" ${CYCLES_INSTALL_PATH}/lib)
endmacro()
cycles_optix_kernel_add(
CYCLES_OPTIX_KERNEL_ADD(
kernel_optix
"device/optix/kernel.cu"
"")
cycles_optix_kernel_add(
CYCLES_OPTIX_KERNEL_ADD(
kernel_optix_shader_raytrace
"device/optix/kernel_shader_raytrace.cu"
"--keep-device-functions")
@@ -710,8 +712,6 @@ if(WITH_CYCLES_DEVICE_OPTIX AND WITH_CYCLES_CUDA_BINARIES)
cycles_set_solution_folder(cycles_kernel_optix)
endif()
# oneAPI module
if(WITH_CYCLES_DEVICE_ONEAPI)
if(WIN32)
set(cycles_kernel_oneapi_lib ${CMAKE_CURRENT_BINARY_DIR}/cycles_kernel_oneapi.dll)
@@ -793,7 +793,7 @@ if(WITH_CYCLES_DEVICE_ONEAPI)
if(UNIX AND NOT APPLE)
if(NOT WITH_CXX11_ABI)
check_library_exists(sycl
_ZN4sycl3_V17handler22verifyUsedKernelBundleERKSs ${sycl_compiler_root}/../lib SYCL_NO_CXX11_ABI)
_ZN2cl4sycl7handler22verifyUsedKernelBundleERKSs ${sycl_compiler_root}/../lib SYCL_NO_CXX11_ABI)
if(SYCL_NO_CXX11_ABI)
list(APPEND sycl_compiler_flags -D_GLIBCXX_USE_CXX11_ABI=0)
endif()
@@ -951,8 +951,8 @@ endif()
# Warnings to avoid using doubles in the kernel.
if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_C_COMPILER_ID MATCHES "Clang")
add_check_cxx_compiler_flag(CMAKE_CXX_FLAGS _has_cxxflag_float_conversion "-Werror=float-conversion")
add_check_cxx_compiler_flag(CMAKE_CXX_FLAGS _has_cxxflag_double_promotion "-Werror=double-promotion")
ADD_CHECK_CXX_COMPILER_FLAG(CMAKE_CXX_FLAGS _has_cxxflag_float_conversion "-Werror=float-conversion")
ADD_CHECK_CXX_COMPILER_FLAG(CMAKE_CXX_FLAGS _has_cxxflag_double_promotion "-Werror=double-promotion")
unset(_has_cxxflag_float_conversion)
unset(_has_cxxflag_double_promotion)
endif()

View File

@@ -53,16 +53,13 @@ ccl_device_inline ccl_private ShaderClosure *bsdf_alloc(ccl_private ShaderData *
{
kernel_assert(isfinite_safe(weight));
/* No negative weights allowed. */
weight = max(weight, zero_float3());
const float sample_weight = fabsf(average(weight));
/* Use comparison this way to help dealing with non-finite weight: if the average is not finite
* we will not allocate new closure. */
if (sample_weight >= CLOSURE_WEIGHT_CUTOFF) {
ccl_private ShaderClosure *sc = closure_alloc(sd, size, CLOSURE_NONE_ID, weight);
if (!sc) {
if (sc == NULL) {
return NULL;
}
@@ -74,4 +71,34 @@ ccl_device_inline ccl_private ShaderClosure *bsdf_alloc(ccl_private ShaderData *
return NULL;
}
#ifdef __OSL__
ccl_device_inline ShaderClosure *bsdf_alloc_osl(ShaderData *sd,
int size,
Spectrum weight,
void *data)
{
kernel_assert(isfinite_safe(weight));
const float sample_weight = fabsf(average(weight));
/* Use comparison this way to help dealing with non-finite weight: if the average is not finite
* we will not allocate new closure. */
if (sample_weight >= CLOSURE_WEIGHT_CUTOFF) {
ShaderClosure *sc = closure_alloc(sd, size, CLOSURE_NONE_ID, weight);
if (!sc) {
return NULL;
}
memcpy((void *)sc, data, size);
sc->weight = weight;
sc->sample_weight = sample_weight;
return sc;
}
return NULL;
}
#endif
CCL_NAMESPACE_END

View File

@@ -69,11 +69,7 @@ ccl_device_inline float bsdf_get_roughness_squared(ccl_private const ShaderClosu
* Yining Karl Li and Brent Burley. */
ccl_device_inline float bump_shadowing_term(float3 Ng, float3 N, float3 I)
{
const float cosNI = dot(N, I);
if (cosNI < 0.0f) {
Ng = -Ng;
}
float g = safe_divide(dot(Ng, I), cosNI * dot(Ng, N));
float g = safe_divide(dot(Ng, I), dot(N, I) * dot(Ng, N));
/* If the incoming light is on the unshadowed side, return full brightness. */
if (g >= 1.0f) {
@@ -102,12 +98,6 @@ ccl_device_inline float shift_cos_in(float cos_in, const float frequency_multipl
return val;
}
ccl_device_inline bool bsdf_is_transmission(ccl_private const ShaderClosure *sc,
const float3 omega_in)
{
return dot(sc->N, omega_in) < 0.0f;
}
ccl_device_inline int bsdf_sample(KernelGlobals kg,
ccl_private ShaderData *sd,
ccl_private const ShaderClosure *sc,
@@ -115,9 +105,7 @@ ccl_device_inline int bsdf_sample(KernelGlobals kg,
float randv,
ccl_private Spectrum *eval,
ccl_private float3 *omega_in,
ccl_private float *pdf,
ccl_private float2 *sampled_roughness,
ccl_private float *eta)
ccl_private float *pdf)
{
/* For curves use the smooth normal, particularly for ribbons the geometric
* normal gives too much darkening otherwise. */
@@ -127,131 +115,78 @@ ccl_device_inline int bsdf_sample(KernelGlobals kg,
switch (sc->type) {
case CLOSURE_BSDF_DIFFUSE_ID:
label = bsdf_diffuse_sample(sc, Ng, sd->I, randu, randv, eval, omega_in, pdf);
*sampled_roughness = one_float2();
*eta = 1.0f;
break;
#if defined(__SVM__) || defined(__OSL__)
#ifdef __SVM__
case CLOSURE_BSDF_OREN_NAYAR_ID:
label = bsdf_oren_nayar_sample(sc, Ng, sd->I, randu, randv, eval, omega_in, pdf);
*sampled_roughness = one_float2();
*eta = 1.0f;
break;
# ifdef __OSL__
case CLOSURE_BSDF_PHONG_RAMP_ID:
label = bsdf_phong_ramp_sample(
sc, Ng, sd->I, randu, randv, eval, omega_in, pdf, sampled_roughness);
*eta = 1.0f;
label = bsdf_phong_ramp_sample(sc, Ng, sd->I, randu, randv, eval, omega_in, pdf);
break;
case CLOSURE_BSDF_DIFFUSE_RAMP_ID:
label = bsdf_diffuse_ramp_sample(sc, Ng, sd->I, randu, randv, eval, omega_in, pdf);
*sampled_roughness = one_float2();
*eta = 1.0f;
break;
# endif
case CLOSURE_BSDF_TRANSLUCENT_ID:
label = bsdf_translucent_sample(sc, Ng, sd->I, randu, randv, eval, omega_in, pdf);
*sampled_roughness = one_float2();
*eta = 1.0f;
break;
case CLOSURE_BSDF_REFLECTION_ID:
label = bsdf_reflection_sample(sc, Ng, sd->I, randu, randv, eval, omega_in, pdf, eta);
*sampled_roughness = zero_float2();
label = bsdf_reflection_sample(sc, Ng, sd->I, randu, randv, eval, omega_in, pdf);
break;
case CLOSURE_BSDF_REFRACTION_ID:
label = bsdf_refraction_sample(sc, Ng, sd->I, randu, randv, eval, omega_in, pdf, eta);
*sampled_roughness = zero_float2();
label = bsdf_refraction_sample(sc, Ng, sd->I, randu, randv, eval, omega_in, pdf);
break;
case CLOSURE_BSDF_TRANSPARENT_ID:
label = bsdf_transparent_sample(sc, Ng, sd->I, randu, randv, eval, omega_in, pdf);
*sampled_roughness = zero_float2();
*eta = 1.0f;
break;
case CLOSURE_BSDF_MICROFACET_GGX_ID:
case CLOSURE_BSDF_MICROFACET_GGX_FRESNEL_ID:
case CLOSURE_BSDF_MICROFACET_GGX_CLEARCOAT_ID:
case CLOSURE_BSDF_MICROFACET_GGX_REFRACTION_ID:
label = bsdf_microfacet_ggx_sample(
kg, sc, Ng, sd->I, randu, randv, eval, omega_in, pdf, sampled_roughness, eta);
label = bsdf_microfacet_ggx_sample(kg, sc, Ng, sd->I, randu, randv, eval, omega_in, pdf);
break;
case CLOSURE_BSDF_MICROFACET_MULTI_GGX_ID:
case CLOSURE_BSDF_MICROFACET_MULTI_GGX_FRESNEL_ID:
label = bsdf_microfacet_multi_ggx_sample(kg,
sc,
Ng,
sd->I,
randu,
randv,
eval,
omega_in,
pdf,
&sd->lcg_state,
sampled_roughness,
eta);
label = bsdf_microfacet_multi_ggx_sample(
kg, sc, Ng, sd->I, randu, randv, eval, omega_in, pdf, &sd->lcg_state);
break;
case CLOSURE_BSDF_MICROFACET_MULTI_GGX_GLASS_ID:
case CLOSURE_BSDF_MICROFACET_MULTI_GGX_GLASS_FRESNEL_ID:
label = bsdf_microfacet_multi_ggx_glass_sample(kg,
sc,
Ng,
sd->I,
randu,
randv,
eval,
omega_in,
pdf,
&sd->lcg_state,
sampled_roughness,
eta);
label = bsdf_microfacet_multi_ggx_glass_sample(
kg, sc, Ng, sd->I, randu, randv, eval, omega_in, pdf, &sd->lcg_state);
break;
case CLOSURE_BSDF_MICROFACET_BECKMANN_ID:
case CLOSURE_BSDF_MICROFACET_BECKMANN_REFRACTION_ID:
label = bsdf_microfacet_beckmann_sample(
kg, sc, Ng, sd->I, randu, randv, eval, omega_in, pdf, sampled_roughness, eta);
kg, sc, Ng, sd->I, randu, randv, eval, omega_in, pdf);
break;
case CLOSURE_BSDF_ASHIKHMIN_SHIRLEY_ID:
label = bsdf_ashikhmin_shirley_sample(
sc, Ng, sd->I, randu, randv, eval, omega_in, pdf, sampled_roughness);
*eta = 1.0f;
label = bsdf_ashikhmin_shirley_sample(sc, Ng, sd->I, randu, randv, eval, omega_in, pdf);
break;
case CLOSURE_BSDF_ASHIKHMIN_VELVET_ID:
label = bsdf_ashikhmin_velvet_sample(sc, Ng, sd->I, randu, randv, eval, omega_in, pdf);
*sampled_roughness = one_float2();
*eta = 1.0f;
break;
case CLOSURE_BSDF_DIFFUSE_TOON_ID:
label = bsdf_diffuse_toon_sample(sc, Ng, sd->I, randu, randv, eval, omega_in, pdf);
*sampled_roughness = one_float2();
*eta = 1.0f;
break;
case CLOSURE_BSDF_GLOSSY_TOON_ID:
label = bsdf_glossy_toon_sample(sc, Ng, sd->I, randu, randv, eval, omega_in, pdf);
// double check if this is valid
*sampled_roughness = one_float2();
*eta = 1.0f;
break;
case CLOSURE_BSDF_HAIR_REFLECTION_ID:
label = bsdf_hair_reflection_sample(
sc, Ng, sd->I, randu, randv, eval, omega_in, pdf, sampled_roughness);
*eta = 1.0f;
label = bsdf_hair_reflection_sample(sc, Ng, sd->I, randu, randv, eval, omega_in, pdf);
break;
case CLOSURE_BSDF_HAIR_TRANSMISSION_ID:
label = bsdf_hair_transmission_sample(
sc, Ng, sd->I, randu, randv, eval, omega_in, pdf, sampled_roughness);
*eta = 1.0f;
label = bsdf_hair_transmission_sample(sc, Ng, sd->I, randu, randv, eval, omega_in, pdf);
break;
case CLOSURE_BSDF_HAIR_PRINCIPLED_ID:
label = bsdf_principled_hair_sample(
kg, sc, sd, randu, randv, eval, omega_in, pdf, sampled_roughness, eta);
label = bsdf_principled_hair_sample(kg, sc, sd, randu, randv, eval, omega_in, pdf);
break;
case CLOSURE_BSDF_PRINCIPLED_DIFFUSE_ID:
label = bsdf_principled_diffuse_sample(sc, Ng, sd->I, randu, randv, eval, omega_in, pdf);
*sampled_roughness = one_float2();
*eta = 1.0f;
break;
case CLOSURE_BSDF_PRINCIPLED_SHEEN_ID:
label = bsdf_principled_sheen_sample(sc, Ng, sd->I, randu, randv, eval, omega_in, pdf);
*sampled_roughness = one_float2();
*eta = 1.0f;
break;
#endif
default:
@@ -274,12 +209,11 @@ ccl_device_inline int bsdf_sample(KernelGlobals kg,
const float frequency_multiplier =
kernel_data_fetch(objects, sd->object).shadow_terminator_shading_offset;
if (frequency_multiplier > 1.0f) {
const float cosNI = dot(*omega_in, sc->N);
*eval *= shift_cos_in(cosNI, frequency_multiplier);
*eval *= shift_cos_in(dot(*omega_in, sc->N), frequency_multiplier);
}
if (label & LABEL_DIFFUSE) {
if (!isequal(sc->N, sd->N)) {
*eval *= bump_shadowing_term(sd->N, sc->N, *omega_in);
*eval *= bump_shadowing_term((label & LABEL_TRANSMIT) ? -sd->N : sd->N, sc->N, *omega_in);
}
}
}
@@ -292,246 +226,6 @@ ccl_device_inline int bsdf_sample(KernelGlobals kg,
return label;
}
ccl_device_inline void bsdf_roughness_eta(const KernelGlobals kg,
ccl_private const ShaderClosure *sc,
ccl_private float2 *roughness,
ccl_private float *eta)
{
bool refractive = false;
float alpha = 1.0f;
switch (sc->type) {
case CLOSURE_BSDF_DIFFUSE_ID:
*roughness = one_float2();
*eta = 1.0f;
break;
#ifdef __SVM__
case CLOSURE_BSDF_OREN_NAYAR_ID:
*roughness = one_float2();
*eta = 1.0f;
break;
# ifdef __OSL__
case CLOSURE_BSDF_PHONG_RAMP_ID:
alpha = phong_ramp_exponent_to_roughness(((ccl_private const PhongRampBsdf *)sc)->exponent);
*roughness = make_float2(alpha, alpha);
*eta = 1.0f;
break;
case CLOSURE_BSDF_DIFFUSE_RAMP_ID:
*roughness = one_float2();
*eta = 1.0f;
break;
# endif
case CLOSURE_BSDF_TRANSLUCENT_ID:
*roughness = one_float2();
*eta = 1.0f;
break;
case CLOSURE_BSDF_REFLECTION_ID: {
ccl_private const MicrofacetBsdf *bsdf = (ccl_private const MicrofacetBsdf *)sc;
*roughness = zero_float2();
*eta = bsdf->ior;
break;
}
case CLOSURE_BSDF_REFRACTION_ID: {
ccl_private const MicrofacetBsdf *bsdf = (ccl_private const MicrofacetBsdf *)sc;
*roughness = zero_float2();
// do we need to inverse eta??
*eta = bsdf->ior;
break;
}
case CLOSURE_BSDF_TRANSPARENT_ID:
*roughness = zero_float2();
*eta = 1.0f;
break;
case CLOSURE_BSDF_MICROFACET_GGX_ID:
case CLOSURE_BSDF_MICROFACET_GGX_FRESNEL_ID:
case CLOSURE_BSDF_MICROFACET_GGX_CLEARCOAT_ID:
case CLOSURE_BSDF_MICROFACET_GGX_REFRACTION_ID: {
ccl_private const MicrofacetBsdf *bsdf = (ccl_private const MicrofacetBsdf *)sc;
*roughness = make_float2(bsdf->alpha_x, bsdf->alpha_y);
refractive = bsdf->type == CLOSURE_BSDF_MICROFACET_GGX_REFRACTION_ID;
*eta = refractive ? 1.0f / bsdf->ior : bsdf->ior;
break;
}
case CLOSURE_BSDF_MICROFACET_MULTI_GGX_ID:
case CLOSURE_BSDF_MICROFACET_MULTI_GGX_FRESNEL_ID: {
ccl_private const MicrofacetBsdf *bsdf = (ccl_private const MicrofacetBsdf *)sc;
*roughness = make_float2(bsdf->alpha_x, bsdf->alpha_y);
*eta = bsdf->ior;
break;
}
case CLOSURE_BSDF_MICROFACET_MULTI_GGX_GLASS_ID:
case CLOSURE_BSDF_MICROFACET_MULTI_GGX_GLASS_FRESNEL_ID: {
ccl_private const MicrofacetBsdf *bsdf = (ccl_private const MicrofacetBsdf *)sc;
*roughness = make_float2(bsdf->alpha_x, bsdf->alpha_y);
*eta = bsdf->ior;
break;
}
case CLOSURE_BSDF_MICROFACET_BECKMANN_ID:
case CLOSURE_BSDF_MICROFACET_BECKMANN_REFRACTION_ID: {
ccl_private const MicrofacetBsdf *bsdf = (ccl_private const MicrofacetBsdf *)sc;
*roughness = make_float2(bsdf->alpha_x, bsdf->alpha_y);
refractive = bsdf->type == CLOSURE_BSDF_MICROFACET_BECKMANN_REFRACTION_ID;
*eta = refractive ? 1.0f / bsdf->ior : bsdf->ior;
} break;
case CLOSURE_BSDF_ASHIKHMIN_SHIRLEY_ID: {
ccl_private const MicrofacetBsdf *bsdf = (ccl_private const MicrofacetBsdf *)sc;
*roughness = make_float2(bsdf->alpha_x, bsdf->alpha_y);
*eta = 1.0f;
break;
}
case CLOSURE_BSDF_ASHIKHMIN_VELVET_ID:
*roughness = one_float2();
*eta = 1.0f;
break;
case CLOSURE_BSDF_DIFFUSE_TOON_ID:
*roughness = one_float2();
*eta = 1.0f;
break;
case CLOSURE_BSDF_GLOSSY_TOON_ID:
// double check if this is valid
*roughness = one_float2();
*eta = 1.0f;
break;
case CLOSURE_BSDF_HAIR_REFLECTION_ID:
*roughness = make_float2(((ccl_private HairBsdf *)sc)->roughness1,
((ccl_private HairBsdf *)sc)->roughness2);
*eta = 1.0f;
break;
case CLOSURE_BSDF_HAIR_TRANSMISSION_ID:
*roughness = make_float2(((ccl_private HairBsdf *)sc)->roughness1,
((ccl_private HairBsdf *)sc)->roughness2);
*eta = 1.0f;
break;
case CLOSURE_BSDF_HAIR_PRINCIPLED_ID:
alpha = ((ccl_private PrincipledHairBSDF *)sc)->m0_roughness;
*roughness = make_float2(alpha, alpha);
*eta = ((ccl_private PrincipledHairBSDF *)sc)->eta;
break;
case CLOSURE_BSDF_PRINCIPLED_DIFFUSE_ID:
*roughness = one_float2();
*eta = 1.0f;
break;
case CLOSURE_BSDF_PRINCIPLED_SHEEN_ID:
*roughness = one_float2();
*eta = 1.0f;
break;
#endif
default:
*roughness = one_float2();
*eta = 1.0f;
break;
}
}
ccl_device_inline int bsdf_label(const KernelGlobals kg,
ccl_private const ShaderClosure *sc,
const float3 omega_in)
{
/* For curves use the smooth normal, particularly for ribbons the geometric
* normal gives too much darkening otherwise. */
int label;
switch (sc->type) {
case CLOSURE_BSDF_DIFFUSE_ID:
case CLOSURE_BSSRDF_BURLEY_ID:
case CLOSURE_BSSRDF_RANDOM_WALK_ID:
case CLOSURE_BSSRDF_RANDOM_WALK_FIXED_RADIUS_ID:
label = LABEL_REFLECT | LABEL_DIFFUSE;
break;
#ifdef __SVM__
case CLOSURE_BSDF_OREN_NAYAR_ID:
label = LABEL_REFLECT | LABEL_DIFFUSE;
break;
# ifdef __OSL__
case CLOSURE_BSDF_PHONG_RAMP_ID:
label = LABEL_REFLECT | LABEL_GLOSSY;
break;
case CLOSURE_BSDF_DIFFUSE_RAMP_ID:
label = LABEL_REFLECT | LABEL_DIFFUSE;
break;
# endif
case CLOSURE_BSDF_TRANSLUCENT_ID:
label = LABEL_TRANSMIT | LABEL_DIFFUSE;
break;
case CLOSURE_BSDF_REFLECTION_ID:
label = LABEL_REFLECT | LABEL_SINGULAR;
break;
case CLOSURE_BSDF_REFRACTION_ID:
label = LABEL_TRANSMIT | LABEL_SINGULAR;
break;
case CLOSURE_BSDF_TRANSPARENT_ID:
label = LABEL_TRANSMIT | LABEL_TRANSPARENT;
break;
case CLOSURE_BSDF_MICROFACET_GGX_ID:
case CLOSURE_BSDF_MICROFACET_GGX_FRESNEL_ID:
case CLOSURE_BSDF_MICROFACET_GGX_CLEARCOAT_ID:
case CLOSURE_BSDF_MICROFACET_MULTI_GGX_ID:
case CLOSURE_BSDF_MICROFACET_MULTI_GGX_FRESNEL_ID:
case CLOSURE_BSDF_MICROFACET_BECKMANN_ID: {
ccl_private const MicrofacetBsdf *bsdf = (ccl_private const MicrofacetBsdf *)sc;
label = (bsdf->alpha_x * bsdf->alpha_y <= 1e-7f) ? LABEL_REFLECT | LABEL_SINGULAR :
LABEL_REFLECT | LABEL_GLOSSY;
break;
}
case CLOSURE_BSDF_MICROFACET_GGX_REFRACTION_ID:
case CLOSURE_BSDF_MICROFACET_BECKMANN_REFRACTION_ID: {
ccl_private const MicrofacetBsdf *bsdf = (ccl_private const MicrofacetBsdf *)sc;
label = (bsdf->alpha_x * bsdf->alpha_y <= 1e-7f) ? LABEL_TRANSMIT | LABEL_SINGULAR :
LABEL_TRANSMIT | LABEL_GLOSSY;
break;
}
case CLOSURE_BSDF_MICROFACET_MULTI_GGX_GLASS_ID:
case CLOSURE_BSDF_MICROFACET_MULTI_GGX_GLASS_FRESNEL_ID:
label = (bsdf_is_transmission(sc, omega_in)) ? LABEL_TRANSMIT | LABEL_GLOSSY :
LABEL_REFLECT | LABEL_GLOSSY;
break;
case CLOSURE_BSDF_ASHIKHMIN_SHIRLEY_ID:
label = LABEL_REFLECT | LABEL_GLOSSY;
break;
case CLOSURE_BSDF_ASHIKHMIN_VELVET_ID:
label = LABEL_REFLECT | LABEL_DIFFUSE;
break;
case CLOSURE_BSDF_DIFFUSE_TOON_ID:
label = LABEL_REFLECT | LABEL_DIFFUSE;
break;
case CLOSURE_BSDF_GLOSSY_TOON_ID:
label = LABEL_REFLECT | LABEL_GLOSSY;
break;
case CLOSURE_BSDF_HAIR_REFLECTION_ID:
label = LABEL_REFLECT | LABEL_GLOSSY;
break;
case CLOSURE_BSDF_HAIR_TRANSMISSION_ID:
label = LABEL_TRANSMIT | LABEL_GLOSSY;
break;
case CLOSURE_BSDF_HAIR_PRINCIPLED_ID:
if (bsdf_is_transmission(sc, omega_in))
label = LABEL_TRANSMIT | LABEL_GLOSSY;
else
label = LABEL_REFLECT | LABEL_GLOSSY;
break;
case CLOSURE_BSDF_PRINCIPLED_DIFFUSE_ID:
label = LABEL_REFLECT | LABEL_DIFFUSE;
break;
case CLOSURE_BSDF_PRINCIPLED_SHEEN_ID:
label = LABEL_REFLECT | LABEL_DIFFUSE;
break;
#endif
default:
label = LABEL_NONE;
break;
}
/* Test if BSDF sample should be treated as transparent for background. */
if (label & LABEL_TRANSMIT) {
float threshold_squared = kernel_data.background.transparent_roughness_squared_threshold;
if (threshold_squared >= 0.0f) {
if (bsdf_get_specular_roughness_squared(sc) <= threshold_squared) {
label |= LABEL_TRANSMIT_TRANSPARENT;
}
}
}
return label;
}
#ifndef __KERNEL_CUDA__
ccl_device
#else
@@ -542,104 +236,179 @@ ccl_device_inline
ccl_private ShaderData *sd,
ccl_private const ShaderClosure *sc,
const float3 omega_in,
const bool is_transmission,
ccl_private float *pdf)
{
Spectrum eval = zero_spectrum();
switch (sc->type) {
case CLOSURE_BSDF_DIFFUSE_ID:
eval = bsdf_diffuse_eval(sc, sd->I, omega_in, pdf);
break;
#if defined(__SVM__) || defined(__OSL__)
case CLOSURE_BSDF_OREN_NAYAR_ID:
eval = bsdf_oren_nayar_eval(sc, sd->I, omega_in, pdf);
break;
if (!is_transmission) {
switch (sc->type) {
case CLOSURE_BSDF_DIFFUSE_ID:
eval = bsdf_diffuse_eval_reflect(sc, sd->I, omega_in, pdf);
break;
#ifdef __SVM__
case CLOSURE_BSDF_OREN_NAYAR_ID:
eval = bsdf_oren_nayar_eval_reflect(sc, sd->I, omega_in, pdf);
break;
# ifdef __OSL__
case CLOSURE_BSDF_PHONG_RAMP_ID:
eval = bsdf_phong_ramp_eval(sc, sd->I, omega_in, pdf);
break;
case CLOSURE_BSDF_DIFFUSE_RAMP_ID:
eval = bsdf_diffuse_ramp_eval(sc, sd->I, omega_in, pdf);
break;
case CLOSURE_BSDF_PHONG_RAMP_ID:
eval = bsdf_phong_ramp_eval_reflect(sc, sd->I, omega_in, pdf);
break;
case CLOSURE_BSDF_DIFFUSE_RAMP_ID:
eval = bsdf_diffuse_ramp_eval_reflect(sc, sd->I, omega_in, pdf);
break;
# endif
case CLOSURE_BSDF_TRANSLUCENT_ID:
eval = bsdf_translucent_eval(sc, sd->I, omega_in, pdf);
break;
case CLOSURE_BSDF_REFLECTION_ID:
eval = bsdf_reflection_eval(sc, sd->I, omega_in, pdf);
break;
case CLOSURE_BSDF_REFRACTION_ID:
eval = bsdf_refraction_eval(sc, sd->I, omega_in, pdf);
break;
case CLOSURE_BSDF_TRANSPARENT_ID:
eval = bsdf_transparent_eval(sc, sd->I, omega_in, pdf);
break;
case CLOSURE_BSDF_MICROFACET_GGX_ID:
case CLOSURE_BSDF_MICROFACET_GGX_FRESNEL_ID:
case CLOSURE_BSDF_MICROFACET_GGX_CLEARCOAT_ID:
case CLOSURE_BSDF_MICROFACET_GGX_REFRACTION_ID:
eval = bsdf_microfacet_ggx_eval(sc, sd->I, omega_in, pdf);
break;
case CLOSURE_BSDF_MICROFACET_MULTI_GGX_ID:
case CLOSURE_BSDF_MICROFACET_MULTI_GGX_FRESNEL_ID:
eval = bsdf_microfacet_multi_ggx_eval(sc, sd->I, omega_in, pdf, &sd->lcg_state);
break;
case CLOSURE_BSDF_MICROFACET_MULTI_GGX_GLASS_ID:
case CLOSURE_BSDF_MICROFACET_MULTI_GGX_GLASS_FRESNEL_ID:
eval = bsdf_microfacet_multi_ggx_glass_eval(sc, sd->I, omega_in, pdf, &sd->lcg_state);
break;
case CLOSURE_BSDF_MICROFACET_BECKMANN_ID:
case CLOSURE_BSDF_MICROFACET_BECKMANN_REFRACTION_ID:
eval = bsdf_microfacet_beckmann_eval(sc, sd->I, omega_in, pdf);
break;
case CLOSURE_BSDF_ASHIKHMIN_SHIRLEY_ID:
eval = bsdf_ashikhmin_shirley_eval(sc, sd->I, omega_in, pdf);
break;
case CLOSURE_BSDF_ASHIKHMIN_VELVET_ID:
eval = bsdf_ashikhmin_velvet_eval(sc, sd->I, omega_in, pdf);
break;
case CLOSURE_BSDF_DIFFUSE_TOON_ID:
eval = bsdf_diffuse_toon_eval(sc, sd->I, omega_in, pdf);
break;
case CLOSURE_BSDF_GLOSSY_TOON_ID:
eval = bsdf_glossy_toon_eval(sc, sd->I, omega_in, pdf);
break;
case CLOSURE_BSDF_HAIR_PRINCIPLED_ID:
eval = bsdf_principled_hair_eval(kg, sd, sc, omega_in, pdf);
break;
case CLOSURE_BSDF_HAIR_REFLECTION_ID:
eval = bsdf_hair_reflection_eval(sc, sd->I, omega_in, pdf);
break;
case CLOSURE_BSDF_HAIR_TRANSMISSION_ID:
eval = bsdf_hair_transmission_eval(sc, sd->I, omega_in, pdf);
break;
case CLOSURE_BSDF_PRINCIPLED_DIFFUSE_ID:
eval = bsdf_principled_diffuse_eval(sc, sd->I, omega_in, pdf);
break;
case CLOSURE_BSDF_PRINCIPLED_SHEEN_ID:
eval = bsdf_principled_sheen_eval(sc, sd->I, omega_in, pdf);
break;
case CLOSURE_BSDF_TRANSLUCENT_ID:
eval = bsdf_translucent_eval_reflect(sc, sd->I, omega_in, pdf);
break;
case CLOSURE_BSDF_REFLECTION_ID:
eval = bsdf_reflection_eval_reflect(sc, sd->I, omega_in, pdf);
break;
case CLOSURE_BSDF_REFRACTION_ID:
eval = bsdf_refraction_eval_reflect(sc, sd->I, omega_in, pdf);
break;
case CLOSURE_BSDF_TRANSPARENT_ID:
eval = bsdf_transparent_eval_reflect(sc, sd->I, omega_in, pdf);
break;
case CLOSURE_BSDF_MICROFACET_GGX_ID:
case CLOSURE_BSDF_MICROFACET_GGX_FRESNEL_ID:
case CLOSURE_BSDF_MICROFACET_GGX_CLEARCOAT_ID:
case CLOSURE_BSDF_MICROFACET_GGX_REFRACTION_ID:
eval = bsdf_microfacet_ggx_eval_reflect(sc, sd->I, omega_in, pdf);
break;
case CLOSURE_BSDF_MICROFACET_MULTI_GGX_ID:
case CLOSURE_BSDF_MICROFACET_MULTI_GGX_FRESNEL_ID:
eval = bsdf_microfacet_multi_ggx_eval_reflect(sc, sd->I, omega_in, pdf, &sd->lcg_state);
break;
case CLOSURE_BSDF_MICROFACET_MULTI_GGX_GLASS_ID:
case CLOSURE_BSDF_MICROFACET_MULTI_GGX_GLASS_FRESNEL_ID:
eval = bsdf_microfacet_multi_ggx_glass_eval_reflect(
sc, sd->I, omega_in, pdf, &sd->lcg_state);
break;
case CLOSURE_BSDF_MICROFACET_BECKMANN_ID:
case CLOSURE_BSDF_MICROFACET_BECKMANN_REFRACTION_ID:
eval = bsdf_microfacet_beckmann_eval_reflect(sc, sd->I, omega_in, pdf);
break;
case CLOSURE_BSDF_ASHIKHMIN_SHIRLEY_ID:
eval = bsdf_ashikhmin_shirley_eval_reflect(sc, sd->I, omega_in, pdf);
break;
case CLOSURE_BSDF_ASHIKHMIN_VELVET_ID:
eval = bsdf_ashikhmin_velvet_eval_reflect(sc, sd->I, omega_in, pdf);
break;
case CLOSURE_BSDF_DIFFUSE_TOON_ID:
eval = bsdf_diffuse_toon_eval_reflect(sc, sd->I, omega_in, pdf);
break;
case CLOSURE_BSDF_GLOSSY_TOON_ID:
eval = bsdf_glossy_toon_eval_reflect(sc, sd->I, omega_in, pdf);
break;
case CLOSURE_BSDF_HAIR_PRINCIPLED_ID:
eval = bsdf_principled_hair_eval(kg, sd, sc, omega_in, pdf);
break;
case CLOSURE_BSDF_HAIR_REFLECTION_ID:
eval = bsdf_hair_reflection_eval_reflect(sc, sd->I, omega_in, pdf);
break;
case CLOSURE_BSDF_HAIR_TRANSMISSION_ID:
eval = bsdf_hair_transmission_eval_reflect(sc, sd->I, omega_in, pdf);
break;
case CLOSURE_BSDF_PRINCIPLED_DIFFUSE_ID:
eval = bsdf_principled_diffuse_eval_reflect(sc, sd->I, omega_in, pdf);
break;
case CLOSURE_BSDF_PRINCIPLED_SHEEN_ID:
eval = bsdf_principled_sheen_eval_reflect(sc, sd->I, omega_in, pdf);
break;
#endif
default:
break;
}
if (CLOSURE_IS_BSDF_DIFFUSE(sc->type)) {
if (!isequal(sc->N, sd->N)) {
eval *= bump_shadowing_term(sd->N, sc->N, omega_in);
default:
break;
}
if (CLOSURE_IS_BSDF_DIFFUSE(sc->type)) {
if (!isequal(sc->N, sd->N)) {
eval *= bump_shadowing_term(sd->N, sc->N, omega_in);
}
}
/* Shadow terminator offset. */
const float frequency_multiplier =
kernel_data_fetch(objects, sd->object).shadow_terminator_shading_offset;
if (frequency_multiplier > 1.0f) {
eval *= shift_cos_in(dot(omega_in, sc->N), frequency_multiplier);
}
}
/* Shadow terminator offset. */
const float frequency_multiplier =
kernel_data_fetch(objects, sd->object).shadow_terminator_shading_offset;
if (frequency_multiplier > 1.0f) {
const float cosNI = dot(omega_in, sc->N);
if (cosNI >= 0.0f) {
eval *= shift_cos_in(cosNI, frequency_multiplier);
else {
switch (sc->type) {
case CLOSURE_BSDF_DIFFUSE_ID:
eval = bsdf_diffuse_eval_transmit(sc, sd->I, omega_in, pdf);
break;
#ifdef __SVM__
case CLOSURE_BSDF_OREN_NAYAR_ID:
eval = bsdf_oren_nayar_eval_transmit(sc, sd->I, omega_in, pdf);
break;
case CLOSURE_BSDF_TRANSLUCENT_ID:
eval = bsdf_translucent_eval_transmit(sc, sd->I, omega_in, pdf);
break;
case CLOSURE_BSDF_REFLECTION_ID:
eval = bsdf_reflection_eval_transmit(sc, sd->I, omega_in, pdf);
break;
case CLOSURE_BSDF_REFRACTION_ID:
eval = bsdf_refraction_eval_transmit(sc, sd->I, omega_in, pdf);
break;
case CLOSURE_BSDF_TRANSPARENT_ID:
eval = bsdf_transparent_eval_transmit(sc, sd->I, omega_in, pdf);
break;
case CLOSURE_BSDF_MICROFACET_GGX_ID:
case CLOSURE_BSDF_MICROFACET_GGX_FRESNEL_ID:
case CLOSURE_BSDF_MICROFACET_GGX_CLEARCOAT_ID:
case CLOSURE_BSDF_MICROFACET_GGX_REFRACTION_ID:
eval = bsdf_microfacet_ggx_eval_transmit(sc, sd->I, omega_in, pdf);
break;
case CLOSURE_BSDF_MICROFACET_MULTI_GGX_ID:
case CLOSURE_BSDF_MICROFACET_MULTI_GGX_FRESNEL_ID:
eval = bsdf_microfacet_multi_ggx_eval_transmit(sc, sd->I, omega_in, pdf, &sd->lcg_state);
break;
case CLOSURE_BSDF_MICROFACET_MULTI_GGX_GLASS_ID:
case CLOSURE_BSDF_MICROFACET_MULTI_GGX_GLASS_FRESNEL_ID:
eval = bsdf_microfacet_multi_ggx_glass_eval_transmit(
sc, sd->I, omega_in, pdf, &sd->lcg_state);
break;
case CLOSURE_BSDF_MICROFACET_BECKMANN_ID:
case CLOSURE_BSDF_MICROFACET_BECKMANN_REFRACTION_ID:
eval = bsdf_microfacet_beckmann_eval_transmit(sc, sd->I, omega_in, pdf);
break;
case CLOSURE_BSDF_ASHIKHMIN_SHIRLEY_ID:
eval = bsdf_ashikhmin_shirley_eval_transmit(sc, sd->I, omega_in, pdf);
break;
case CLOSURE_BSDF_ASHIKHMIN_VELVET_ID:
eval = bsdf_ashikhmin_velvet_eval_transmit(sc, sd->I, omega_in, pdf);
break;
case CLOSURE_BSDF_DIFFUSE_TOON_ID:
eval = bsdf_diffuse_toon_eval_transmit(sc, sd->I, omega_in, pdf);
break;
case CLOSURE_BSDF_GLOSSY_TOON_ID:
eval = bsdf_glossy_toon_eval_transmit(sc, sd->I, omega_in, pdf);
break;
case CLOSURE_BSDF_HAIR_PRINCIPLED_ID:
eval = bsdf_principled_hair_eval(kg, sd, sc, omega_in, pdf);
break;
case CLOSURE_BSDF_HAIR_REFLECTION_ID:
eval = bsdf_hair_reflection_eval_transmit(sc, sd->I, omega_in, pdf);
break;
case CLOSURE_BSDF_HAIR_TRANSMISSION_ID:
eval = bsdf_hair_transmission_eval_transmit(sc, sd->I, omega_in, pdf);
break;
case CLOSURE_BSDF_PRINCIPLED_DIFFUSE_ID:
eval = bsdf_principled_diffuse_eval_transmit(sc, sd->I, omega_in, pdf);
break;
case CLOSURE_BSDF_PRINCIPLED_SHEEN_ID:
eval = bsdf_principled_sheen_eval_transmit(sc, sd->I, omega_in, pdf);
break;
#endif
default:
break;
}
if (CLOSURE_IS_BSDF_DIFFUSE(sc->type)) {
if (!isequal(sc->N, sd->N)) {
eval *= bump_shadowing_term(-sd->N, sc->N, omega_in);
}
}
}
#ifdef WITH_CYCLES_DEBUG
kernel_assert(*pdf >= 0.0f);
kernel_assert(eval.x >= 0.0f && eval.y >= 0.0f && eval.z >= 0.0f);
@@ -650,7 +419,7 @@ ccl_device_inline
ccl_device void bsdf_blur(KernelGlobals kg, ccl_private ShaderClosure *sc, float roughness)
{
/* TODO: do we want to blur volume closures? */
#if defined(__SVM__) || defined(__OSL__)
#ifdef __SVM__
switch (sc->type) {
case CLOSURE_BSDF_MICROFACET_MULTI_GGX_ID:
case CLOSURE_BSDF_MICROFACET_MULTI_GGX_FRESNEL_ID:

View File

@@ -39,10 +39,11 @@ ccl_device_inline float bsdf_ashikhmin_shirley_roughness_to_exponent(float rough
return 2.0f / (roughness * roughness) - 2.0f;
}
ccl_device_forceinline Spectrum bsdf_ashikhmin_shirley_eval(ccl_private const ShaderClosure *sc,
const float3 I,
const float3 omega_in,
ccl_private float *pdf)
ccl_device_forceinline Spectrum
bsdf_ashikhmin_shirley_eval_reflect(ccl_private const ShaderClosure *sc,
const float3 I,
const float3 omega_in,
ccl_private float *pdf)
{
ccl_private const MicrofacetBsdf *bsdf = (ccl_private const MicrofacetBsdf *)sc;
float3 N = bsdf->N;
@@ -52,60 +53,70 @@ ccl_device_forceinline Spectrum bsdf_ashikhmin_shirley_eval(ccl_private const Sh
float out = 0.0f;
if (fmaxf(bsdf->alpha_x, bsdf->alpha_y) <= 1e-4f || !(NdotI > 0.0f && NdotO > 0.0f)) {
if (fmaxf(bsdf->alpha_x, bsdf->alpha_y) <= 1e-4f) {
*pdf = 0.0f;
return zero_spectrum();
}
if (NdotI > 0.0f && NdotO > 0.0f) {
NdotI = fmaxf(NdotI, 1e-6f);
NdotO = fmaxf(NdotO, 1e-6f);
float3 H = normalize(omega_in + I);
float HdotI = fmaxf(fabsf(dot(H, I)), 1e-6f);
float HdotN = fmaxf(dot(H, N), 1e-6f);
NdotI = fmaxf(NdotI, 1e-6f);
NdotO = fmaxf(NdotO, 1e-6f);
float3 H = normalize(omega_in + I);
float HdotI = fmaxf(fabsf(dot(H, I)), 1e-6f);
float HdotN = fmaxf(dot(H, N), 1e-6f);
/* pump from original paper
* (first derivative disc., but cancels the HdotI in the pdf nicely) */
float pump = 1.0f / fmaxf(1e-6f, (HdotI * fmaxf(NdotO, NdotI)));
/* pump from d-brdf paper */
/*float pump = 1.0f / fmaxf(1e-4f, ((NdotO + NdotI) * (NdotO*NdotI))); */
/* pump from original paper
* (first derivative disc., but cancels the HdotI in the pdf nicely) */
float pump = 1.0f / fmaxf(1e-6f, (HdotI * fmaxf(NdotO, NdotI)));
/* pump from d-brdf paper */
/*float pump = 1.0f / fmaxf(1e-4f, ((NdotO + NdotI) * (NdotO*NdotI))); */
float n_x = bsdf_ashikhmin_shirley_roughness_to_exponent(bsdf->alpha_x);
float n_y = bsdf_ashikhmin_shirley_roughness_to_exponent(bsdf->alpha_y);
float n_x = bsdf_ashikhmin_shirley_roughness_to_exponent(bsdf->alpha_x);
float n_y = bsdf_ashikhmin_shirley_roughness_to_exponent(bsdf->alpha_y);
if (n_x == n_y) {
/* isotropic */
float e = n_x;
float lobe = powf(HdotN, e);
float norm = (n_x + 1.0f) / (8.0f * M_PI_F);
if (n_x == n_y) {
/* isotropic */
float e = n_x;
float lobe = powf(HdotN, e);
float norm = (n_x + 1.0f) / (8.0f * M_PI_F);
out = NdotO * norm * lobe * pump;
/* this is p_h / 4(H.I) (conversion from 'wh measure' to 'wi measure', eq. 8 in paper). */
*pdf = norm * lobe / HdotI;
}
else {
/* anisotropic */
float3 X, Y;
make_orthonormals_tangent(N, bsdf->T, &X, &Y);
float HdotX = dot(H, X);
float HdotY = dot(H, Y);
float lobe;
if (HdotN < 1.0f) {
float e = (n_x * HdotX * HdotX + n_y * HdotY * HdotY) / (1.0f - HdotN * HdotN);
lobe = powf(HdotN, e);
out = NdotO * norm * lobe * pump;
/* this is p_h / 4(H.I) (conversion from 'wh measure' to 'wi measure', eq. 8 in paper). */
*pdf = norm * lobe / HdotI;
}
else {
lobe = 1.0f;
}
float norm = sqrtf((n_x + 1.0f) * (n_y + 1.0f)) / (8.0f * M_PI_F);
/* anisotropic */
float3 X, Y;
make_orthonormals_tangent(N, bsdf->T, &X, &Y);
out = NdotO * norm * lobe * pump;
*pdf = norm * lobe / HdotI;
float HdotX = dot(H, X);
float HdotY = dot(H, Y);
float lobe;
if (HdotN < 1.0f) {
float e = (n_x * HdotX * HdotX + n_y * HdotY * HdotY) / (1.0f - HdotN * HdotN);
lobe = powf(HdotN, e);
}
else {
lobe = 1.0f;
}
float norm = sqrtf((n_x + 1.0f) * (n_y + 1.0f)) / (8.0f * M_PI_F);
out = NdotO * norm * lobe * pump;
*pdf = norm * lobe / HdotI;
}
}
return make_spectrum(out);
}
ccl_device Spectrum bsdf_ashikhmin_shirley_eval_transmit(ccl_private const ShaderClosure *sc,
const float3 I,
const float3 omega_in,
ccl_private float *pdf)
{
*pdf = 0.0f;
return zero_spectrum();
}
ccl_device_inline void bsdf_ashikhmin_shirley_sample_first_quadrant(float n_x,
float n_y,
float randu,
@@ -126,93 +137,88 @@ ccl_device int bsdf_ashikhmin_shirley_sample(ccl_private const ShaderClosure *sc
float randv,
ccl_private Spectrum *eval,
ccl_private float3 *omega_in,
ccl_private float *pdf,
ccl_private float2 *sampled_roughness)
ccl_private float *pdf)
{
ccl_private const MicrofacetBsdf *bsdf = (ccl_private const MicrofacetBsdf *)sc;
*sampled_roughness = make_float2(bsdf->alpha_x, bsdf->alpha_y);
float3 N = bsdf->N;
int label = LABEL_REFLECT | LABEL_GLOSSY;
float NdotI = dot(N, I);
if (!(NdotI > 0.0f)) {
*pdf = 0.0f;
*eval = zero_spectrum();
return LABEL_NONE;
}
if (NdotI > 0.0f) {
float n_x = bsdf_ashikhmin_shirley_roughness_to_exponent(bsdf->alpha_x);
float n_y = bsdf_ashikhmin_shirley_roughness_to_exponent(bsdf->alpha_y);
float n_x = bsdf_ashikhmin_shirley_roughness_to_exponent(bsdf->alpha_x);
float n_y = bsdf_ashikhmin_shirley_roughness_to_exponent(bsdf->alpha_y);
/* get x,y basis on the surface for anisotropy */
float3 X, Y;
/* get x,y basis on the surface for anisotropy */
float3 X, Y;
if (n_x == n_y)
make_orthonormals(N, &X, &Y);
else
make_orthonormals_tangent(N, bsdf->T, &X, &Y);
if (n_x == n_y)
make_orthonormals(N, &X, &Y);
else
make_orthonormals_tangent(N, bsdf->T, &X, &Y);
/* sample spherical coords for h in tangent space */
float phi;
float cos_theta;
if (n_x == n_y) {
/* isotropic sampling */
phi = M_2PI_F * randu;
cos_theta = powf(randv, 1.0f / (n_x + 1.0f));
}
else {
/* anisotropic sampling */
if (randu < 0.25f) { /* first quadrant */
float remapped_randu = 4.0f * randu;
bsdf_ashikhmin_shirley_sample_first_quadrant(
n_x, n_y, remapped_randu, randv, &phi, &cos_theta);
/* sample spherical coords for h in tangent space */
float phi;
float cos_theta;
if (n_x == n_y) {
/* isotropic sampling */
phi = M_2PI_F * randu;
cos_theta = powf(randv, 1.0f / (n_x + 1.0f));
}
else if (randu < 0.5f) { /* second quadrant */
float remapped_randu = 4.0f * (.5f - randu);
bsdf_ashikhmin_shirley_sample_first_quadrant(
n_x, n_y, remapped_randu, randv, &phi, &cos_theta);
phi = M_PI_F - phi;
else {
/* anisotropic sampling */
if (randu < 0.25f) { /* first quadrant */
float remapped_randu = 4.0f * randu;
bsdf_ashikhmin_shirley_sample_first_quadrant(
n_x, n_y, remapped_randu, randv, &phi, &cos_theta);
}
else if (randu < 0.5f) { /* second quadrant */
float remapped_randu = 4.0f * (.5f - randu);
bsdf_ashikhmin_shirley_sample_first_quadrant(
n_x, n_y, remapped_randu, randv, &phi, &cos_theta);
phi = M_PI_F - phi;
}
else if (randu < 0.75f) { /* third quadrant */
float remapped_randu = 4.0f * (randu - 0.5f);
bsdf_ashikhmin_shirley_sample_first_quadrant(
n_x, n_y, remapped_randu, randv, &phi, &cos_theta);
phi = M_PI_F + phi;
}
else { /* fourth quadrant */
float remapped_randu = 4.0f * (1.0f - randu);
bsdf_ashikhmin_shirley_sample_first_quadrant(
n_x, n_y, remapped_randu, randv, &phi, &cos_theta);
phi = 2.0f * M_PI_F - phi;
}
}
else if (randu < 0.75f) { /* third quadrant */
float remapped_randu = 4.0f * (randu - 0.5f);
bsdf_ashikhmin_shirley_sample_first_quadrant(
n_x, n_y, remapped_randu, randv, &phi, &cos_theta);
phi = M_PI_F + phi;
/* get half vector in tangent space */
float sin_theta = sqrtf(fmaxf(0.0f, 1.0f - cos_theta * cos_theta));
float cos_phi = cosf(phi);
float sin_phi = sinf(phi); /* no sqrt(1-cos^2) here b/c it causes artifacts */
float3 h = make_float3(sin_theta * cos_phi, sin_theta * sin_phi, cos_theta);
/* half vector to world space */
float3 H = h.x * X + h.y * Y + h.z * N;
float HdotI = dot(H, I);
if (HdotI < 0.0f)
H = -H;
/* reflect I on H to get omega_in */
*omega_in = -I + (2.0f * HdotI) * H;
if (fmaxf(bsdf->alpha_x, bsdf->alpha_y) <= 1e-4f) {
/* Some high number for MIS. */
*pdf = 1e6f;
*eval = make_spectrum(1e6f);
label = LABEL_REFLECT | LABEL_SINGULAR;
}
else { /* fourth quadrant */
float remapped_randu = 4.0f * (1.0f - randu);
bsdf_ashikhmin_shirley_sample_first_quadrant(
n_x, n_y, remapped_randu, randv, &phi, &cos_theta);
phi = 2.0f * M_PI_F - phi;
else {
/* leave the rest to eval_reflect */
*eval = bsdf_ashikhmin_shirley_eval_reflect(sc, I, *omega_in, pdf);
}
}
/* get half vector in tangent space */
float sin_theta = sqrtf(fmaxf(0.0f, 1.0f - cos_theta * cos_theta));
float cos_phi = cosf(phi);
float sin_phi = sinf(phi); /* no sqrt(1-cos^2) here b/c it causes artifacts */
float3 h = make_float3(sin_theta * cos_phi, sin_theta * sin_phi, cos_theta);
/* half vector to world space */
float3 H = h.x * X + h.y * Y + h.z * N;
float HdotI = dot(H, I);
if (HdotI < 0.0f)
H = -H;
/* reflect I on H to get omega_in */
*omega_in = -I + (2.0f * HdotI) * H;
if (fmaxf(bsdf->alpha_x, bsdf->alpha_y) <= 1e-4f) {
/* Some high number for MIS. */
*pdf = 1e6f;
*eval = make_spectrum(1e6f);
label = LABEL_REFLECT | LABEL_SINGULAR;
}
else {
/* leave the rest to eval */
*eval = bsdf_ashikhmin_shirley_eval(sc, I, *omega_in, pdf);
}
return label;
}

View File

@@ -31,10 +31,10 @@ ccl_device int bsdf_ashikhmin_velvet_setup(ccl_private VelvetBsdf *bsdf)
return SD_BSDF | SD_BSDF_HAS_EVAL;
}
ccl_device Spectrum bsdf_ashikhmin_velvet_eval(ccl_private const ShaderClosure *sc,
const float3 I,
const float3 omega_in,
ccl_private float *pdf)
ccl_device Spectrum bsdf_ashikhmin_velvet_eval_reflect(ccl_private const ShaderClosure *sc,
const float3 I,
const float3 omega_in,
ccl_private float *pdf)
{
ccl_private const VelvetBsdf *bsdf = (ccl_private const VelvetBsdf *)sc;
float m_invsigma2 = bsdf->invsigma2;
@@ -42,37 +42,46 @@ ccl_device Spectrum bsdf_ashikhmin_velvet_eval(ccl_private const ShaderClosure *
float cosNO = dot(N, I);
float cosNI = dot(N, omega_in);
if (!(cosNO > 0 && cosNI > 0)) {
*pdf = 0.0f;
return zero_spectrum();
if (cosNO > 0 && cosNI > 0) {
float3 H = normalize(omega_in + I);
float cosNH = dot(N, H);
float cosHO = fabsf(dot(I, H));
if (!(fabsf(cosNH) < 1.0f - 1e-5f && cosHO > 1e-5f)) {
*pdf = 0.0f;
return zero_spectrum();
}
float cosNHdivHO = cosNH / cosHO;
cosNHdivHO = fmaxf(cosNHdivHO, 1e-5f);
float fac1 = 2 * fabsf(cosNHdivHO * cosNO);
float fac2 = 2 * fabsf(cosNHdivHO * cosNI);
float sinNH2 = 1 - cosNH * cosNH;
float sinNH4 = sinNH2 * sinNH2;
float cotangent2 = (cosNH * cosNH) / sinNH2;
float D = expf(-cotangent2 * m_invsigma2) * m_invsigma2 * M_1_PI_F / sinNH4;
float G = fminf(1.0f, fminf(fac1, fac2)); // TODO: derive G from D analytically
float out = 0.25f * (D * G) / cosNO;
*pdf = 0.5f * M_1_PI_F;
return make_spectrum(out);
}
float3 H = normalize(omega_in + I);
*pdf = 0.0f;
return zero_spectrum();
}
float cosNH = dot(N, H);
float cosHO = fabsf(dot(I, H));
if (!(fabsf(cosNH) < 1.0f - 1e-5f && cosHO > 1e-5f)) {
*pdf = 0.0f;
return zero_spectrum();
}
float cosNHdivHO = cosNH / cosHO;
cosNHdivHO = fmaxf(cosNHdivHO, 1e-5f);
float fac1 = 2 * fabsf(cosNHdivHO * cosNO);
float fac2 = 2 * fabsf(cosNHdivHO * cosNI);
float sinNH2 = 1 - cosNH * cosNH;
float sinNH4 = sinNH2 * sinNH2;
float cotangent2 = (cosNH * cosNH) / sinNH2;
float D = expf(-cotangent2 * m_invsigma2) * m_invsigma2 * M_1_PI_F / sinNH4;
float G = fminf(1.0f, fminf(fac1, fac2)); // TODO: derive G from D analytically
float out = 0.25f * (D * G) / cosNO;
*pdf = 0.5f * M_1_PI_F;
return make_spectrum(out);
ccl_device Spectrum bsdf_ashikhmin_velvet_eval_transmit(ccl_private const ShaderClosure *sc,
const float3 I,
const float3 omega_in,
ccl_private float *pdf)
{
*pdf = 0.0f;
return zero_spectrum();
}
ccl_device int bsdf_ashikhmin_velvet_sample(ccl_private const ShaderClosure *sc,
@@ -92,42 +101,41 @@ ccl_device int bsdf_ashikhmin_velvet_sample(ccl_private const ShaderClosure *sc,
// distribution over the hemisphere
sample_uniform_hemisphere(N, randu, randv, omega_in, pdf);
if (!(dot(Ng, *omega_in) > 0)) {
if (dot(Ng, *omega_in) > 0) {
float3 H = normalize(*omega_in + I);
float cosNI = dot(N, *omega_in);
float cosNO = dot(N, I);
float cosNH = dot(N, H);
float cosHO = fabsf(dot(I, H));
if (fabsf(cosNO) > 1e-5f && fabsf(cosNH) < 1.0f - 1e-5f && cosHO > 1e-5f) {
float cosNHdivHO = cosNH / cosHO;
cosNHdivHO = fmaxf(cosNHdivHO, 1e-5f);
float fac1 = 2 * fabsf(cosNHdivHO * cosNO);
float fac2 = 2 * fabsf(cosNHdivHO * cosNI);
float sinNH2 = 1 - cosNH * cosNH;
float sinNH4 = sinNH2 * sinNH2;
float cotangent2 = (cosNH * cosNH) / sinNH2;
float D = expf(-cotangent2 * m_invsigma2) * m_invsigma2 * M_1_PI_F / sinNH4;
float G = fminf(1.0f, fminf(fac1, fac2)); // TODO: derive G from D analytically
float power = 0.25f * (D * G) / cosNO;
*eval = make_spectrum(power);
}
else {
*pdf = 0.0f;
*eval = zero_spectrum();
}
}
else {
*pdf = 0.0f;
*eval = zero_spectrum();
return LABEL_NONE;
}
float3 H = normalize(*omega_in + I);
float cosNI = dot(N, *omega_in);
float cosNO = dot(N, I);
float cosNH = dot(N, H);
float cosHO = fabsf(dot(I, H));
if (!(fabsf(cosNO) > 1e-5f && fabsf(cosNH) < 1.0f - 1e-5f && cosHO > 1e-5f)) {
*pdf = 0.0f;
*eval = zero_spectrum();
return LABEL_NONE;
}
float cosNHdivHO = cosNH / cosHO;
cosNHdivHO = fmaxf(cosNHdivHO, 1e-5f);
float fac1 = 2 * fabsf(cosNHdivHO * cosNO);
float fac2 = 2 * fabsf(cosNHdivHO * cosNI);
float sinNH2 = 1 - cosNH * cosNH;
float sinNH4 = sinNH2 * sinNH2;
float cotangent2 = (cosNH * cosNH) / sinNH2;
float D = expf(-cotangent2 * m_invsigma2) * m_invsigma2 * M_1_PI_F / sinNH4;
float G = fminf(1.0f, fminf(fac1, fac2)); // TODO: derive G from D analytically
float power = 0.25f * (D * G) / cosNO;
*eval = make_spectrum(power);
return LABEL_REFLECT | LABEL_DIFFUSE;
}

View File

@@ -26,10 +26,10 @@ ccl_device int bsdf_diffuse_setup(ccl_private DiffuseBsdf *bsdf)
return SD_BSDF | SD_BSDF_HAS_EVAL;
}
ccl_device Spectrum bsdf_diffuse_eval(ccl_private const ShaderClosure *sc,
const float3 I,
const float3 omega_in,
ccl_private float *pdf)
ccl_device Spectrum bsdf_diffuse_eval_reflect(ccl_private const ShaderClosure *sc,
const float3 I,
const float3 omega_in,
ccl_private float *pdf)
{
ccl_private const DiffuseBsdf *bsdf = (ccl_private const DiffuseBsdf *)sc;
float3 N = bsdf->N;
@@ -39,6 +39,15 @@ ccl_device Spectrum bsdf_diffuse_eval(ccl_private const ShaderClosure *sc,
return make_spectrum(cos_pi);
}
ccl_device Spectrum bsdf_diffuse_eval_transmit(ccl_private const ShaderClosure *sc,
const float3 I,
const float3 omega_in,
ccl_private float *pdf)
{
*pdf = 0.0f;
return zero_spectrum();
}
ccl_device int bsdf_diffuse_sample(ccl_private const ShaderClosure *sc,
float3 Ng,
float3 I,
@@ -72,10 +81,19 @@ ccl_device int bsdf_translucent_setup(ccl_private DiffuseBsdf *bsdf)
return SD_BSDF | SD_BSDF_HAS_EVAL;
}
ccl_device Spectrum bsdf_translucent_eval(ccl_private const ShaderClosure *sc,
const float3 I,
const float3 omega_in,
ccl_private float *pdf)
ccl_device Spectrum bsdf_translucent_eval_reflect(ccl_private const ShaderClosure *sc,
const float3 I,
const float3 omega_in,
ccl_private float *pdf)
{
*pdf = 0.0f;
return zero_spectrum();
}
ccl_device Spectrum bsdf_translucent_eval_transmit(ccl_private const ShaderClosure *sc,
const float3 I,
const float3 omega_in,
ccl_private float *pdf)
{
ccl_private const DiffuseBsdf *bsdf = (ccl_private const DiffuseBsdf *)sc;
float3 N = bsdf->N;

View File

@@ -47,23 +47,25 @@ ccl_device void bsdf_diffuse_ramp_blur(ccl_private ShaderClosure *sc, float roug
{
}
ccl_device Spectrum bsdf_diffuse_ramp_eval(ccl_private const ShaderClosure *sc,
const float3 I,
const float3 omega_in,
ccl_private float *pdf)
ccl_device Spectrum bsdf_diffuse_ramp_eval_reflect(ccl_private const ShaderClosure *sc,
const float3 I,
const float3 omega_in,
ccl_private float *pdf)
{
const DiffuseRampBsdf *bsdf = (const DiffuseRampBsdf *)sc;
float3 N = bsdf->N;
float cos_pi = fmaxf(dot(N, omega_in), 0.0f);
if (cos_pi >= 0.0f) {
*pdf = cos_pi * M_1_PI_F;
return rgb_to_spectrum(bsdf_diffuse_ramp_get_color(bsdf->colors, cos_pi) * M_1_PI_F);
}
else {
*pdf = 0.0f;
return zero_spectrum();
}
*pdf = cos_pi * M_1_PI_F;
return rgb_to_spectrum(bsdf_diffuse_ramp_get_color(bsdf->colors, cos_pi) * M_1_PI_F);
}
ccl_device Spectrum bsdf_diffuse_ramp_eval_transmit(ccl_private const ShaderClosure *sc,
const float3 I,
const float3 omega_in,
ccl_private float *pdf)
{
return zero_spectrum();
}
ccl_device int bsdf_diffuse_ramp_sample(ccl_private const ShaderClosure *sc,

Some files were not shown because too many files have changed in this diff Show More