FreeWRL / FreeX3D 4.3.0
libsound.cpp
1
2// license: MIT or equivalent permissive
3// MS visualStudio - /MD needed in Debug_glew
4// .. because some lib-Release-only-facing .cpp like JScript_sm.cpp facing release mozjs-24.dll conflict with MDd
5#include "LabSound.h"
6#include <algorithm>
7#include <array>
8#include <chrono>
9#include <condition_variable>
10#include <cstdint>
11#include <functional>
12#include <map>
13#include <mutex>
14#include <random>
15#include <string>
16#include <thread>
17#include <vector>
18#include <list>
19#include <iostream>
20#include <string>
21
22
23// In the future, this class could do all kinds of clever things, like setting up the context,
24// handling recording functionality, etc.
25
26#include <string>
27#include <vector>
28
29using namespace lab;
30#include "LabSound/backends/AudioDevice_RtAudio.h"
31
32
33// Returns input, output
34inline std::pair<AudioStreamConfig, AudioStreamConfig> GetDefaultAudioDeviceConfiguration(const bool with_input = false)
35{
36 AudioStreamConfig inputConfig;
37 AudioStreamConfig outputConfig;
38
39 const std::vector<AudioDeviceInfo> audioDevices = lab::AudioDevice_RtAudio::MakeAudioDeviceList();
40 //const AudioDeviceIndex default_output_device = lab::GetDefaultOutputAudioDeviceIndex();
41 //const AudioDeviceIndex default_input_device = lab::GetDefaultInputAudioDeviceIndex();
42
43 AudioDeviceInfo defaultOutputInfo, defaultInputInfo;
44 for (auto& info : audioDevices)
45 {
46 if (info.is_default_output) defaultOutputInfo = info;
47 else if (info.is_default_input) defaultInputInfo = info;
48 printf("Device %d: %s\n", info.index, info.identifier.c_str());
49 printf(" input channels: %d\n", info.num_input_channels);
50 printf(" output channels: %d\n", info.num_output_channels);
51 printf(" default sample rate: %f\n", info.nominal_samplerate);
52 printf(" is default input: %s\n", info.is_default_input ? "true" : "false");
53 printf(" is default output: %s\n", info.is_default_output ? "true" : "false");
54 }
55
56 if (defaultOutputInfo.index != -1)
57 {
58 outputConfig.device_index = defaultOutputInfo.index;
59 outputConfig.desired_channels = std::min(uint32_t(2), defaultOutputInfo.num_output_channels);
60 outputConfig.desired_samplerate = defaultOutputInfo.nominal_samplerate;
61 }
62
63 if (with_input)
64 {
65 if (defaultInputInfo.index != -1)
66 {
67 inputConfig.device_index = defaultInputInfo.index;
68 inputConfig.desired_channels = std::min(uint32_t(1), defaultInputInfo.num_input_channels);
69 inputConfig.desired_samplerate = defaultInputInfo.nominal_samplerate;
70 }
71 else
72 {
73 throw std::invalid_argument("the default audio input device was requested but none were found");
74 }
75 }
76
77 return { inputConfig, outputConfig };
78}
79inline std::vector<std::string> SplitCommandLine(int argc, char** argv)
80{
81 // takes a string, and separates out according to embedded quoted strings
82 // the quotes are preserved, and quotes are escaped.
83 // examples
84 // * abc > abc
85 // * abc "def" > abc, "def"
86 // * a "def" ghi > a, "def", ghi
87 // * a\"bc > a\"bc
88
89 auto Separate = [](const std::string& input) -> std::vector<std::string>
90 {
91 std::vector<std::string> output;
92
93 size_t curr = 0;
94 size_t start = 0;
95 size_t end = input.length();
96 bool inQuotes = false;
97
98 while (curr < end)
99 {
100 if (input[curr] == '\\')
101 {
102 ++curr;
103 if (curr != end && input[curr] == '\"')
104 ++curr;
105 }
106 else
107 {
108 if (input[curr] == '\"')
109 {
110 // no empty string if not in quotes, otherwise preserve it
111 if (inQuotes || (start != curr))
112 {
113 output.push_back(input.substr(start - (inQuotes ? 1 : 0), curr - start + (inQuotes ? 2 : 0)));
114 }
115 inQuotes = !inQuotes;
116 start = curr + 1;
117 }
118 ++curr;
119 }
120 }
121
122 // catch the case of a trailing substring that was not quoted, or a completely unquoted string
123 if (curr - start > 0) output.push_back(input.substr(start, curr - start));
124
125 return output;
126 };
127
128 // join the command line together so quoted strings can be found
129 std::string cmd;
130 for (int i = 1; i < argc; ++i)
131 {
132 if (i > 1) cmd += " ";
133 cmd += std::string(argv[i]);
134 }
135
136 // separate the command line, respecting quoted strings
137 std::vector<std::string> result = Separate(cmd);
138 result.insert(result.begin(), std::string{ argv[0] });
139 return result;
140}
141
142inline std::shared_ptr<AudioBus> MakeBusFromSampleFile(char const* const name, int argc, char** argv)
143{
144 std::string path_prefix;
145 auto cmds = SplitCommandLine(argc, argv);
146
147 if (cmds.size() > 1) path_prefix = cmds[1] + "/"; // cmds[0] is the path to the exe
148
149 const std::string path = path_prefix + name;
150 std::shared_ptr<AudioBus> bus = MakeBusFromFile(path, false);
151 if (!bus) throw std::runtime_error("couldn't open " + path);
152
153 return bus;
154}
155
156
157
158template <typename Duration>
159void Wait(Duration duration)
160{
161 std::this_thread::sleep_for(duration);
162}
163
164enum {
165 DIST_LINEAR = lab::PannerNode::LINEAR_DISTANCE,
166 DIST_INVERSE = lab::PannerNode::INVERSE_DISTANCE,
167 DIST_EXPONENTIAL = lab::PannerNode::EXPONENTIAL_DISTANCE,
168 DIST_NONE = 0,
169};
170struct key_name {
171 int iname;
172 const char* cname;
173};
174static struct key_name distance_models[] = {
175{DIST_LINEAR, "LINEAR"},
176{DIST_INVERSE, "INVERSE"},
177{DIST_EXPONENTIAL, "EXPONENTIAL"},
178{DIST_NONE, NULL},
179};
180
181
182static struct key_name periodicWave_types[] = {
183{OscillatorType::SINE, "SINE"},
184{OscillatorType::SQUARE, "SQUARE"},
185{OscillatorType::SAWTOOTH, "SAWTOOTH"},
186{OscillatorType::TRIANGLE, "TRIANGLE"},
187{OscillatorType::CUSTOM, "CUSTOM"},
188{OscillatorType::OSCILLATOR_NONE, NULL},
189};
190
191static struct key_name waveshaper_oversampling_types[] = {
192 {OverSampleType::NONE, "NONE"},
193 {OverSampleType::_2X, "2X"},
194 {OverSampleType::_4X, "4x"},
195};
196
197#ifdef _MSC_VER
198#define strcasecmp _stricmp
199#endif //_MSC_VER
200
201unsigned int name_lookup(char* cname, struct key_name* keynames) {
202 unsigned int i, iname;
203 struct key_name* cn;
204 i = 0;
205 iname = 0;
206 do {
207 cn = &keynames[i];
208 if (cn->cname != NULL) {
209 //if (!strcmp(cn->cname, cname)) {
210 if (!strcasecmp(cn->cname, cname)) {
211 iname = cn->iname;
212 break;
213 }
214 }
215 i++;
216 } while (cn->cname != NULL);
217 return iname;
218
219}
220
221#ifndef DEGREES_PER_RADIAN
222#define DEGREES_PER_RADIAN 57.2957795130823208768
223#endif
224#define RAD2DEGF(x) ((float)((x)*DEGREES_PER_RADIAN))
225
226//make the interface flat C
227#ifdef __cplusplus
228extern "C" {
229#endif
230#define TRUE 1
231#define FALSE 0
232
233#define GLDOUBLE double
234typedef struct {
235 void* p; /* Pointer to actual object */
236 unsigned int x; /* Extra information - reuse count etc */
238
239typedef ptw32_handle_t pthread_t;
240#include "../lib/vrml_parser/Structs.h"
241
242#include "libsound.h"
243 //static void* busbuffers[30];
244 //static int n_busbuffers = 0;
245 static std::map<int, std::shared_ptr<lab::AudioBus>> busses;
246 static int next_bus;
247
248 void libsound_testNoise()
249 {
250 AudioStreamConfig _inputConfig;
251 AudioStreamConfig _outputConfig;
252 auto config = GetDefaultAudioDeviceConfiguration(true);
253 _inputConfig = config.first;
254 _outputConfig = config.second;
255 std::shared_ptr<lab::AudioDevice_RtAudio> device(new lab::AudioDevice_RtAudio(_inputConfig, _outputConfig));
256
257 std::shared_ptr<lab::AudioContext> context;
258 const auto defaultAudioDeviceConfigurations = GetDefaultAudioDeviceConfiguration();
259 //context = lab::MakeRealtimeAudioContext(defaultAudioDeviceConfigurations.second, defaultAudioDeviceConfigurations.first);
260 context = std::make_shared<lab::AudioContext>(false, true);
261
262 lab::AudioContext& ac = *context.get();
263 //auto musicClip = MakeBusFromSampleFile("samples/stereo-music-clip.wav", argc, argv);
264 const std::string path = "C:/Users/Public/dev/source5/audio/LabSound-master/assets/samples/stereo-music-clip.wav";
265 std::shared_ptr<AudioBus> bus = MakeBusFromFile(path, false);
266 auto musicClip = bus;
267 if (!musicClip)
268 return;
269
270 std::shared_ptr<OscillatorNode> oscillator;
271 std::shared_ptr<SampledAudioNode> musicClipNode;
272 std::shared_ptr<GainNode> gain;
273
274 oscillator = std::make_shared<OscillatorNode>(ac);
275 gain = std::make_shared<GainNode>(ac);
276 gain->gain()->setValue(0.0625f);
277
278 musicClipNode = std::make_shared<SampledAudioNode>(ac);
279 {
280 ContextRenderLock r(context.get(), "ex_simple");
281 musicClipNode->setBus(r, musicClip);
282 }
283 context->connect(gain, musicClipNode, 0, 0);
284 musicClipNode->start(0.0f);
285
286 // osc -> gain -> destination
287 context->connect(gain, oscillator, 0, 0);
288 context->connect(context->destinationNode(), gain, 0, 0);
289
290 oscillator->frequency()->setValue(440.f);
291 oscillator->setType(OscillatorType::SINE);
292 oscillator->start(0.0f);
293
294 Wait(std::chrono::seconds(6));
295
296 }
297
298 struct anstruct { std::shared_ptr<lab::AudioNode> anode; };
299 struct acstruct {
300 std::shared_ptr<lab::AudioContext> context;
301 bool running;
302 int next_node;
303 //int next_bus;
304 std::map<int, std::shared_ptr<lab::AudioNode>> nodes;
305 std::map<int, int> nodetype;
306 //std::map<int, std::shared_ptr<lab::AudioBus>> busses;
307 std::shared_ptr<std::vector<std::uint8_t>> bytearray; //analyser
308 std::shared_ptr<std::vector<std::float_t>> floatarray; //analyser
309 };
310
311 //static singleton, which is applied to all context.listeners
312 static int have_listenerpoint = 0;
313 static float listenerpoint_dir[3];
314 static float listenerpoint_up[3];
315 static float listenerpoint_pos[3];
316 void libsound_setListenerPose(float* pos, float* dir, float *up, int trackview) {
317 have_listenerpoint = trackview ? 0 : 1; //if tracking the viewpoint (default) then dont need listenerpose
318 memcpy(listenerpoint_pos, pos, 3 * sizeof(float));
319 memcpy(listenerpoint_dir, dir, 3 * sizeof(float));
320 memcpy(listenerpoint_dir, up, 3 * sizeof(float));
321 }
322
323 static int next_audio_context;
324 static std::map<int, struct acstruct*> audio_contexts;
325 int libsound_createContext0() {
326 // destination, listener, sampleRate, channel_type
327 struct acstruct *ac = new acstruct();
328 std::shared_ptr<lab::AudioContext> context;
329
330 //lab::AudioContext *ccontext;
331 //const auto defaultAudioDeviceConfigurations = GetDefaultAudioDeviceConfiguration(true);
333 //context = std::make_shared<lab::AudioContext>(false, true);
334
335 AudioStreamConfig _inputConfig;
336 AudioStreamConfig _outputConfig;
337 auto config = GetDefaultAudioDeviceConfiguration(true);
338 _inputConfig = config.first;
339 _outputConfig = config.second;
340 std::shared_ptr<lab::AudioDevice_RtAudio> device(new lab::AudioDevice_RtAudio(_inputConfig, _outputConfig));
341 context = std::make_shared<lab::AudioContext>(false, true);
342 auto destinationNode = std::make_shared<lab::AudioDestinationNode>(*context.get(), device);
343 device->setDestinationNode(destinationNode);
344 context->setDestinationNode(destinationNode);
345
346
347 if (1) {
348 auto listener = context->listener();
349 // I believe these are the defaults, and we keep our avatar at 0 and move sound sources relative to avatar
350 //listener->forwardX()->setValue(0.0f);
351 //listener->forwardY()->setValue(0.0f);
352 //listener->forwardZ()->setValue(-1.0f);
353 //listener->upX()->setValue(0.0f);
354 //listener->upY()->setValue(1.0f);
355 //listener->upZ()->setValue(0.0f);
356 listener->setForward({ 0.0,0.0,-1.0 });
357 listener->setUpVector({ 0.0,1.0,0.0 });
358 listener->setPosition({ 0.0,0.0,0.0 });
359
360 //listener->positionX()->setValue(0.0f);
361 //listener->positionY()->setValue(0.0f);
362 //listener->positionZ()->setValue(0.0f);
363 //doppler is deprecated in web audio (web browsers)
364 //listener->dopplerFactor()->setValue(1.0f);
365 }
366 ac->context = context; // libsound_createContext(); // static_cast<lab::AudioContext*>(libsound_createContext());
367 // ac->running = true; //for pause / resume
368 next_audio_context++;
369 audio_contexts[next_audio_context] = ac;
370
371 ac->next_node++;
372 ac->nodes[ac->next_node] = ac->context->destinationNode(); //the output device will be the parent to other source and processing nodes
373 ac->nodetype[ac->next_node] = NODE_AudioDestination;
374 return next_audio_context;
375 }
376 static struct type_name {
377 int iname;
378 const char* cname;
379 } type_names[] = {
380 {NODE_AudioDestination, "AD"},
381 {NODE_Analyser, "Anly"},
382 {NODE_Sound, "Snd"},
383 {NODE_SpatialSound, "SS"},
384 {NODE_AudioClip, "AC"},
385 {NODE_Gain, "Gain"},
386 {NODE_Convolver, "Conv"},
387 {NODE_WaveShaper, "WShp"},
388 {NODE_BiquadFilter, "BiQ"},
389 {NODE_DynamicsCompressor, "DCmp"},
390 {NODE_ChannelSplitter, "Splt"},
391 {NODE_ChannelMerger, "Merg"},
392 {NODE_Delay, "Dlay"},
393 {NODE_BufferAudioSource, "BAS"},
394 {NODE_AudioBuffer, "ABuf"},
395 {NODE_OscillatorSource, "Osc"},
396 {NODE_ListenerPoint, "LP"},
397 {NODE_ListenerPointSource, "LPS"},
398 {NODE_StreamAudioDestination, "SAD"},
399 {NODE_StreamAudioSource, "SAS"},
400 {NODE_MicrophoneSource, "MicS"},
401 {0,NULL},
402 };
403 static const char* nodetype_lookup(int itype) {
404 int i;
405 const char *cname;
406 struct type_name* tn;
407 i = 0;
408 cname = "";
409 do {
410 tn = &type_names[i];
411 if (tn->iname == itype) {
412 cname = tn->cname;
413 break;
414 }
415 i++;
416 } while (tn->iname != 0);
417 return cname;
418
419 }
420 //this one uses int index lookup in a map, much like a vector except can deleted elements
421 struct connection {
422 int icontext;
423 int iparent;
424 int iparent_type;
425 int ichild;
426 int ichild_type;
427 int srcindex;
428 int dstindex;
429 };
430
431 static std::list<connection> connections;
432 void libsound_connect0(int icontext, int idestination, int isource) {
433 struct acstruct* ac = audio_contexts[icontext];
434 std::shared_ptr<AudioNode> destination = ac->nodes[idestination];
435 std::shared_ptr<AudioNode> source = ac->nodes[isource];
436 ac->context->connect(destination, source);
437 int iparent_type = ac->nodetype[idestination];
438 int ichild_type = ac->nodetype[isource];
439 struct connection cc; cc.icontext = icontext; cc.iparent = idestination; cc.iparent_type = iparent_type;
440 cc.ichild = isource; cc.ichild_type = ichild_type; cc.srcindex = 0; cc.dstindex = 0;
441 connections.push_back(cc);
442 }
443 void libsound_connect1(int icontext, int idestination, int isource, int indexSrc) {
444 struct acstruct* ac = audio_contexts[icontext];
445 std::shared_ptr<AudioNode> destination = ac->nodes[idestination];
446 std::shared_ptr<AudioNode> source = ac->nodes[isource];
447 ac->context->connect(destination, source,0,indexSrc);
448 int iparent_type = ac->nodetype[idestination];
449 int ichild_type = ac->nodetype[isource];
450 struct connection cc; cc.icontext = icontext; cc.iparent = idestination; cc.iparent_type = iparent_type;
451 cc.ichild = isource; cc.ichild_type = ichild_type; cc.srcindex = indexSrc; cc.dstindex = 0;
452 connections.push_back(cc);
453
454 }
455 void libsound_connect2(int icontext, int idestination, int isource, int indexDst, int indexSrc) {
456 struct acstruct* ac = audio_contexts[icontext];
457 std::shared_ptr<AudioNode> destination = ac->nodes[idestination];
458 std::shared_ptr<AudioNode> source = ac->nodes[isource];
459 int dstInputs = destination->numberOfInputs();
460 int srcOutputs = source->numberOfOutputs();
461 if (indexDst > dstInputs) {
462 printf("destination number of inputs %d destination idx %d\n", destination->numberOfInputs(), indexDst);
463 printf("\n");
464 printf("\n");
465 printf("\n");
466 printf("\n");
467 return;
468 }
469 if (indexSrc > srcOutputs) {
470 printf("source number of outputs %d source idx %d\n", srcOutputs, indexSrc);
471 printf("\n");
472 printf("\n");
473 printf("\n");
474 printf("\n");
475 return;
476 }
477
478 ac->context->connect(destination, source, indexDst, indexSrc);
479 int iparent_type = ac->nodetype[idestination];
480 int ichild_type = ac->nodetype[isource];
481 struct connection cc; cc.icontext = icontext; cc.iparent = idestination; cc.iparent_type = iparent_type;
482 cc.ichild = isource; cc.ichild_type = ichild_type; cc.srcindex = indexSrc; cc.dstindex = indexDst;
483 connections.push_back(cc);
484
485 }
486 void libsound_disconnect2(int icontext, int idestination, int isource, int indexDst, int indexSrc) {
487 struct acstruct* ac = audio_contexts[icontext];
488 std::shared_ptr<AudioNode> destination = ac->nodes[idestination];
489 std::shared_ptr<AudioNode> source = ac->nodes[isource];
490 int dstInputs = destination->numberOfInputs();
491 int srcOutputs = source->numberOfOutputs();
492 if (indexDst > dstInputs) {
493 printf("destination number of inputs %d destination idx %d\n", destination->numberOfInputs(), indexDst);
494 printf("\n");
495 printf("\n");
496 printf("\n");
497 printf("\n");
498 return;
499 }
500 if (indexSrc > srcOutputs) {
501 printf("source number of outputs %d source idx %d\n", srcOutputs, indexSrc);
502 printf("\n");
503 printf("\n");
504 printf("\n");
505 printf("\n");
506 return;
507 }
508
509 ac->context->disconnect(destination, source, indexDst, indexSrc);
510 //find and remove from vector
511 // vec.erase(vec.begin() + index);
512 int iparent_type = ac->nodetype[idestination];
513 int ichild_type = ac->nodetype[isource];
514 struct connection cc; cc.icontext = icontext; cc.iparent = idestination; cc.iparent_type = iparent_type;
515 cc.ichild = isource; cc.ichild_type = ichild_type; cc.srcindex = indexSrc; cc.dstindex = indexDst;
516
517 std::list<connection>::iterator it;
518 for (it = connections.begin(); it != connections.end(); ++it){
519 connection cn = *it;
520 if (cn.icontext = cc.icontext && cn.iparent == cc.iparent && cn.ichild == cc.ichild
521 && cn.srcindex == cc.srcindex && cn.dstindex == cc.dstindex) {
522 connections.erase(it);
523 break;
524 }
525 }
526
527 }
528
529 void libsound_connect(int icontext, icset iparent) {
530 if (iparent.p)
531 libsound_connect2(icontext, iparent.p, iparent.n, iparent.d, iparent.s);
532 }
533 void libsound_disconnect(int icontext, icset iparent) {
534 libsound_disconnect2(icontext, iparent.p, iparent.n, iparent.ld, iparent.ls);
535
536 }
537 void libsound_print_connections() {
538 printf("\n");
539 printf("%2s %7s %4s %7s %7s %6s %4s\n","ic","iparent", "type", "dstIndx", "srcIndex", "ichild", "type");
540 //for (int i = 0; i < connections.size(); i++) {
541 // struct connection cc = connections[i];
542 std::list<connection>::iterator it;
543 for (it = connections.begin(); it != connections.end(); ++it) {
544 connection cc = *it;
545
546 const char* ptype = nodetype_lookup(cc.iparent_type);
547 const char* ctype = nodetype_lookup(cc.ichild_type);
548
549 printf("%2d %7d %4s %7d %7d %6d %4s\n", cc.icontext, cc.iparent, ptype, cc.dstindex, cc.srcindex, cc.ichild, ctype);
550 }
551 printf("count %d\n", (int)connections.size());
552 }
553 void libsound_pauseContext0(int icontext) {
554 struct acstruct* ac = audio_contexts[icontext];
555 if (ac->running) {
556 ac->context->suspend(); //"any queued samples will (still) play" maybe not the right way to turn off.
557 ac->running = false;
558 }
559 }
560 void libsound_resumeContext0(int icontext) {
561 struct acstruct* ac = audio_contexts[icontext];
562 if (!ac->running) {
563 ac->context->resume(); //"any queued samples will (still) play" maybe not the right way to turn off.
564 ac->running = true;
565 }
566 }
567 void libsound_pauseNode0(struct X3D_Node* node) {
568 //this didn't work
569 struct X3D_SoundRep* srepn = getSoundRep(X3D_NODE(node));
570 int icontext = srepn->icontext;
571 if(icontext){
572 struct acstruct* ac = audio_contexts[icontext];
573 //AudioContext& context = *ac->context.get();
574 switch (node->_nodeType) {
575 case NODE_Sound:
576 {
577 struct X3D_Sound* pnode = (struct X3D_Sound*)node;
578 std::shared_ptr<PannerNode> pannerNode;
579 PannerNode* pannerNode_ptr;
580 if (srepn->inode) {
581 pannerNode_ptr = static_cast<PannerNode*>(ac->nodes[srepn->inode].get());
582 {
583 ContextRenderLock r(ac->context.get(), "ex_simple");
584 pannerNode_ptr->silenceOutputs(r);
585 }
586 }
587 }
588 break;
589 default:
590 break;
591 }
592 }
593 }
594 void libsound_resumeNode0(struct X3D_Node* node) {
595 // this didn't work
596 struct X3D_SoundRep* srepn = getSoundRep(X3D_NODE(node));
597 int icontext = srepn->icontext;
598 if (icontext) {
599 struct acstruct* ac = audio_contexts[icontext];
600 //AudioContext& context = *ac->context.get();
601 switch (node->_nodeType) {
602 case NODE_Sound:
603 {
604 struct X3D_Sound* pnode = (struct X3D_Sound*)node;
605 std::shared_ptr<PannerNode> pannerNode;
606 PannerNode* pannerNode_ptr;
607 if (srepn->inode) {
608 pannerNode_ptr = static_cast<PannerNode*>(ac->nodes[srepn->inode].get());
609 {
610 ContextRenderLock r(ac->context.get(), "ex_simple");
611 pannerNode_ptr->unsilenceOutputs(r);
612 }
613 }
614 }
615 break;
616 default:
617 break;
618 }
619 }
620 }
621
622 int libsound_createBusFromBuffer0(char* bbuffer, int len) {
623 //static list of busses, independent of audio context, so can DEF/USE?
624 std::shared_ptr<AudioBus> Bus;
625 if (0) {
626 FILE * fp = fopen("tmp_buf_wav", "w");
627 int nchunks = len / 1024;
628 int leftover = len % 1024;
629 for(int i=0;i<nchunks;i++)
630 fwrite(&bbuffer[i*1024], 1024, 1, fp);
631 fwrite(&bbuffer[nchunks * 1024], leftover, 1, fp);
632 fclose(fp);
633 // x this doesn't work. I get junk temp file, and Bus null/empty.
634 Bus = MakeBusFromFile("tmp_buf_wav", false);
635 remove("tmp_buf.wav");
636 }
637 else {
638 std::vector<uint8_t> buffer(bbuffer, bbuffer + len); // , (uint8_t)bbuffer);
639 Bus = MakeBusFromMemory(buffer, false);
640 printf(".");
641 }
642 next_bus++;
643 busses[next_bus] = Bus;
644 return next_bus;
645 }
646 int libsound_createBusFromPCM32(float* buffer, int nchannel, int lentotal) {
647 //static list of busses, independent of audio context, so can DEF/USE?
648 int length = lentotal / nchannel;
649 std::shared_ptr<lab::AudioBus> audioBus(new lab::AudioBus(nchannel, length));
650 audioBus->setSampleRate(44100.0);
651
652 //audioBus->setSampleRate((float)audioData->sampleRate);
653 for (int i = 0; i < nchannel; ++i)
654 {
655 std::memcpy(audioBus->channel(i)->mutableData(), buffer + (i * length), length * sizeof(float));
656 }
657
658 next_bus++;
659 busses[next_bus] = audioBus;
660 return next_bus;
661
662 }
663
664//>>>> libnyquist Common.cpp Common.h
665// - I just need the ConvertToFloat32() but don't know how to get it directly from libnyquist
666 // Signed maxes, defined for readabilty/convenience
667#define NQR_INT16_MAX 32767.f
668#define NQR_INT24_MAX 8388608.f
669#define NQR_INT32_MAX 2147483648.f
670
671 static const float NQR_BYTE_2_FLT = 1.0f / 127.0f;
672
673#define int8_to_float32(s) ((float) (s) * NQR_BYTE_2_FLT)
674#define uint8_to_float32(s)(((float) (s) - 128) * NQR_BYTE_2_FLT)
675#define int16_to_float32(s) ((float) (s) / NQR_INT16_MAX)
676#define int24_to_float32(s) ((float) (s) / NQR_INT24_MAX)
677#define int32_to_float32(s) ((float) (s) / NQR_INT32_MAX)
678 enum PCMFormat
679 {
680 PCM_U8,
681 PCM_S8,
682 PCM_16,
683 PCM_24,
684 PCM_32,
685 PCM_64,
686 PCM_FLT,
687 PCM_DBL,
688 PCM_END
689 };
690 //freewrl assume little endian
691#define Read16(n) (n)
692#define Read24(n) (n)
693#define Read32(n) (n)
694#define Read64(n) (n)
695// http://www.dsprelated.com/showthread/comp.dsp/136689-1.php
696 inline int32_t Pack(uint8_t a, uint8_t b, uint8_t c)
697 {
698 // uint32_t tmp = ((c & 0x80) ? (0xFF << 24) : 0x00 << 24) | (c << 16) | (b << 8) | (a << 0); // alternate method
699 int32_t x = (c << 16) | (b << 8) | (a << 0);
700 auto sign_extended = (x) | (!!((x) & 0x800000) * 0xff000000);
701
702 return sign_extended;
703 }
704 void ConvertToFloat32(float* dst, const uint8_t* src, const size_t N, PCMFormat f)
705 {
706 assert(f != PCM_END);
707
708 if (f == PCM_U8)
709 {
710 const uint8_t* dataPtr = reinterpret_cast<const uint8_t*>(src);
711 for (size_t i = 0; i < N; ++i)
712 dst[i] = uint8_to_float32(dataPtr[i]);
713 }
714 else if (f == PCM_S8)
715 {
716 const int8_t* dataPtr = reinterpret_cast<const int8_t*>(src);
717 for (size_t i = 0; i < N; ++i)
718 dst[i] = int8_to_float32(dataPtr[i]);
719 }
720 else if (f == PCM_16)
721 {
722 const int16_t* dataPtr = reinterpret_cast<const int16_t*>(src);
723 for (size_t i = 0; i < N; ++i)
724 dst[i] = int16_to_float32(Read16(dataPtr[i]));
725 }
726 else if (f == PCM_24)
727 {
728 const uint8_t* dataPtr = reinterpret_cast<const uint8_t*>(src);
729 size_t c = 0;
730 for (size_t i = 0; i < N; ++i)
731 {
732 int32_t sample = Pack(dataPtr[c], dataPtr[c + 1], dataPtr[c + 2]);
733 dst[i] = int24_to_float32(sample); // Packed types don't need addtional endian helpers
734 c += 3;
735 }
736 }
737 else if (f == PCM_32)
738 {
739 const int32_t* dataPtr = reinterpret_cast<const int32_t*>(src);
740 for (size_t i = 0; i < N; ++i)
741 dst[i] = int32_to_float32(Read32(dataPtr[i]));
742 }
743
744 //@todo add int64 format
745
746 else if (f == PCM_FLT)
747 {
748 std::memcpy(dst, src, N * sizeof(float));
749 /* const float * dataPtr = reinterpret_cast<const float *>(src);
750 for (size_t i = 0; i < N; ++i)
751 dst[i] = (float) Read32(dataPtr[i]); */
752 }
753 else if (f == PCM_DBL)
754 {
755 const double* dataPtr = reinterpret_cast<const double*>(src);
756 for (size_t i = 0; i < N; ++i)
757 dst[i] = (float)Read64(dataPtr[i]);
758 }
759 }
760//<<< libnyquist Common.cpp Common.h
761 void deinterleave(char* dst, char* src, int nchannel, int bits, int lenbytes) {
762 int kbyte = bits / 8;
763 int chunks = lenbytes / nchannel / kbyte;
764
765 for (int i = 0; i < chunks; i++) {
766 for (int j = 0; j < nchannel; j++)
767 for (int k = 0; k < kbyte; k++)
768 dst[(j * chunks)*kbyte + i*kbyte + k] = src[(i * nchannel + j)*kbyte + k];
769 }
770 }
771 int libsound_createBusFromPCM(char* buffer, int bits, int nchannel, int lentotal, int freq) {
772 //called from MPEG_Utils which extracts PCM data from the mpeg
773 int length = lentotal / nchannel;
774
775 //audioBus->setSampleRate((float)audioData->sampleRate);
776 int bytes32 = length * 32 / bits;
777 int chunks = length * 8 / bits;
778 std::shared_ptr<lab::AudioBus> audioBus(new lab::AudioBus(nchannel, bytes32));
779 audioBus->setSampleRate((float)freq);
780
781 float *chan32 = (float*)std::malloc(bytes32);
782 PCMFormat f = bits == 8 ? PCMFormat::PCM_S8 : bits == 16 ? PCMFormat::PCM_16 : bits == 24 ? PCMFormat::PCM_24 : bits == 32 ? PCMFormat::PCM_32 : PCMFormat::PCM_64;
783 char *buffer2 = (char*)std::malloc(lentotal);
784 deinterleave(buffer2,buffer, nchannel, bits, lentotal);
785 for (int i = 0; i < nchannel; ++i)
786 {
787 char* channel = &buffer2[i * length];
788 ConvertToFloat32(chan32, (uint8_t*)channel, chunks, f);
789 std::memcpy(audioBus->channel(i)->mutableData(), chan32, bytes32);
790 }
791 std::free(buffer2);
792 std::free(chan32);
793 next_bus++;
794 busses[next_bus] = audioBus;
795 return next_bus;
796
797 }
798
799
800 int libsound_createBusFromFile0(char* url) {
801 //static list of busses, independent of audio context, so can DEF/USE?
802 std::shared_ptr<AudioBus> Bus;
803 Bus = MakeBusFromFile(url, false);
804 next_bus++;
805 busses[next_bus] = Bus;
806 return next_bus;
807 }
808 double libsound_computeDuration0(int ibuffer) {
809 AudioBus *bus = static_cast<AudioBus*>(busses[ibuffer].get());
810 double duration = bus->length()* bus->sampleRate();
811 return duration;
812 }
813 void getChannelInterpretation(char *interpretation, char *mode, ChannelInterpretation *interp, ChannelCountMode *cmode) {
814 *interp = lab::ChannelInterpretation::Speakers;
815 if (!_stricmp(interpretation, "DISCRETE"))
816 *interp = lab::ChannelInterpretation::Discrete;
817 // ["max", "clamped-max", "explicit"]
818 *cmode = lab::ChannelCountMode::Max;
819 if (!_stricmp(mode, "CLAMPED-MAX")) *cmode = lab::ChannelCountMode::ClampedMax;
820 else if (!_stricmp(mode, "EXPLICIT")) *cmode = lab::ChannelCountMode::Explicit;
821
822 }
823 struct X3D_SoundRep* getSoundRep(struct X3D_Node* pnode) {
824 //main benefit of _intern Rep structure: saves switch-casing on _NodeType
825 // to get specific common fields used for internal processing only
826 // -- just put the common fields in the Rep
827 // in our case it would be our lookup table int, maybe some AudioNode fields related to connecting, starting, stopping
828 struct X3D_SoundRep* srep = NULL;
829 if (pnode) {
830 srep = (struct X3D_SoundRep*)pnode->_intern;
831 if (!srep) {
832 srep = (struct X3D_SoundRep*)malloc(sizeof(struct X3D_SoundRep));
833 memset(srep, 0, sizeof(struct X3D_SoundRep));
834 srep->itype = 7; //SoundRep
835 pnode->_intern = (struct X3D_GeomRep*)srep;
836 }
837 }
838 return srep;
839 }
840 static int nondefault_channelinterp = 0;
841 void libsound_updateNode3(int icontext, icset iparent, struct X3D_Node* node) {
842 struct acstruct* ac = audio_contexts[icontext];
843 AudioContext& context = *ac->context.get();
844 //lab::AudioContext& ac = *context.get();
845 //goal- switch-case on x3d nodeType and do any labsound node create+connect, update input or update output
846 struct X3D_SoundRep* srepn = getSoundRep(node);
847 switch (node->_nodeType) {
848 case NODE_Sound:
849 {
850 struct X3D_Sound* pnode = (struct X3D_Sound*)node;
851 std::shared_ptr<PannerNode> pannerNode;
852 PannerNode* pannerNode_ptr;
853 GainNode* gain_ptr;
854 if (!srepn->inode) {
855 //gain node on output
856 std::shared_ptr<GainNode> gain;
857 //create labsound node
858 gain = std::make_shared<GainNode>(context);
859 gain_ptr = gain.get();
860 ac->next_node++;
861 ac->nodes[ac->next_node] = gain;
862 ac->nodetype[ac->next_node] = NODE_Gain;
863 srepn->igain = ac->next_node;
864 //connect gain output to parent node input
865 if (iparent.p)
866 libsound_connect2(icontext, iparent.p, srepn->igain, iparent.d, iparent.s);
867
868
869 if (pnode->spatialize != TRUE) {
870 //I don't know how to turn off spatialization
871 //EQUALPOWER doesn't do it
872 //so I will ignore
873 }
874
875 pannerNode = std::make_shared<PannerNode>(context);
876 pannerNode->setPanningModel(PanningModel::EQUALPOWER); //PanningModel:: in later LabSound releases
877 //pannerNode->setPanningModel(PanningModel::HRTF);
878 ac->next_node++;
879 ac->nodes[ac->next_node] = pannerNode;
880 ac->nodetype[ac->next_node] = NODE_Sound;
881 srepn->inode = ac->next_node;
882 srepn->icontext = icontext;
883 // connect Sound output to gain input
884 libsound_connect0(icontext, srepn->igain, srepn->inode);
885
886 pannerNode_ptr = static_cast<PannerNode*>(ac->nodes[srepn->inode].get());
887 //we don't have the inner/outer ellipsoid so we emulate with inner/outer sphere
888 pannerNode_ptr->setConeInnerAngle( 90.0f);
889 pannerNode_ptr->setConeOuterAngle(135.0f);
890 pannerNode_ptr->setConeOuterGain(.07f);
891 // LabSound bug - it can't switch directly to linear
892 // because it erroneously thinks its on LINEAR but its on INVERSE
893 // so we to exponential first, then linear to get to linear
894 pannerNode_ptr->setDistanceModel(lab::PannerNode::EXPONENTIAL_DISTANCE);
895 pannerNode_ptr->setDistanceModel(lab::PannerNode::LINEAR_DISTANCE);
896 //pannerNode_ptr->setDistanceModel(lab::PannerNode::INVERSE_DISTANCE);
897 //pannerNode_ptr->distanceGain()->setValue(0.1f);
898 pannerNode_ptr->setRolloffFactor(1.0f);
899 pannerNode_ptr->setRefDistance(pnode->minFront);
900 pannerNode_ptr->setMaxDistance(pnode->maxFront);
901
902 }
903 pannerNode_ptr = static_cast<PannerNode*>(ac->nodes[srepn->inode].get());
904 if (have_listenerpoint) {
905 auto listener = context.listener();
906 // I believe these are the defaults, and we keep our avatar at 0 and move sound sources relative to avatar
907 //listener->forwardX()->setValue(0.0f);
908 //listener->forwardY()->setValue(0.0f);
909 //listener->forwardZ()->setValue(-1.0f);
910 //listener->upX()->setValue(0.0f);
911 //listener->upY()->setValue(1.0f);
912 //listener->upZ()->setValue(0.0f);
913 float* pos, * dir, * up;
914 pos = listenerpoint_pos;
915 dir = listenerpoint_dir;
916 up = listenerpoint_up;
917 listener->setForward({ dir[0],dir[1],dir[2]});
918 listener->setUpVector({ up[0], up[1], up[2]});
919 listener->setPosition({ pos[0],pos[1], pos[2]});
920
921 }
922 else {
923 auto listener = context.listener();
924 //could be an enable / disable route to LP during scene, need to reset listener to default
925 listener->setForward({ 0.0,0.0,-1.0 });
926 listener->setUpVector({ 0.0,1.0,0.0 });
927 listener->setPosition({ 0.0,0.0,0.0 });
928
929 }
930 gain_ptr = static_cast<GainNode*>(ac->nodes[srepn->igain].get());
931 //std::cout << "[cg= " << pannerNode_ptr->coneGain()->value() << "]" << std::endl;
932 gain_ptr->gain()->setValue(pnode->intensity);
933 float *xyz = pnode->__lastlocation.c;
934 pannerNode_ptr->setPosition(xyz[0], xyz[1], xyz[2]);
935 //pannerNode_ptr->positionX()->setValue(pnode->__lastlocation.c[0]);
936 //pannerNode_ptr->positionY()->setValue(pnode->__lastlocation.c[1]);
937 //pannerNode_ptr->positionZ()->setValue(pnode->__lastlocation.c[2]);
938 float* rxyz = pnode->__lastdirection.c;
939 //std::cout << " rxyz " << rxyz[0] << " " << rxyz[1] << " " << rxyz[2] << std::endl;
940 pannerNode_ptr->setOrientation({ rxyz[0], rxyz[1], rxyz[2] });
941 //pannerNode_ptr->orientationX()->setValue(pnode->__lastdirection.c[0]);
942 //pannerNode_ptr->orientationY()->setValue(pnode->__lastdirection.c[1]);
943 //pannerNode_ptr->orientationZ()->setValue(pnode->__lastdirection.c[2]); //Q. should it be -ve
944 }
945 break;
946
947 case NODE_SpatialSound:
948 {
949 struct X3D_SpatialSound* pnode = (struct X3D_SpatialSound*)node;
950 std::shared_ptr<PannerNode> pannerNode;
951 PannerNode* pannerNode_ptr;
952 GainNode* gain_ptr;
953 if (!srepn->inode) {
954 //gain node on output
955 std::shared_ptr<GainNode> gain;
956 //create labsound node
957 gain = std::make_shared<GainNode>(context);
958 gain_ptr = gain.get();
959 ac->next_node++;
960 ac->nodes[ac->next_node] = gain;
961 ac->nodetype[ac->next_node] = NODE_Gain;
962 srepn->igain = ac->next_node;
963 //connect gain output to parent node input
964 if (iparent.p)
965 libsound_connect2(icontext, iparent.p, srepn->igain, iparent.d, iparent.s);
966
967 if (pnode->spatialize != TRUE) {
968 //I don't know how to turn off spatialization
969 //EQUALPOWER doesn't do it
970 //so I will ignore
971 }
972 bool loaded = true;
973 if (!context.loadHrtfDatabase("hrtf")) {
974 std::string path = std::string("../../../../lib_windows_vc12/LabSound/share") + "/hrtf";
975 if (!context.loadHrtfDatabase(path)) {
976 printf("Could not load spatialization database");
977 loaded = false;
978 }
979 }
980
981 pannerNode = std::make_shared<PannerNode>(context);
982
983 if (pnode->enableHRTF == TRUE && loaded) {
984 pannerNode->setPanningModel(lab::PanningModel::HRTF);
985 printf("SpatialSound HRTF enabled\n");
986 }
987 else {
988 pannerNode->setPanningModel(lab::PanningModel::EQUALPOWER);
989 }
990
991 //pannerNode->setPanningModel(PanningModel::EQUALPOWER); //HRTF); //EQUALPOWER); //
992 pannerNode->setDistanceModel(lab::PannerNode::EXPONENTIAL_DISTANCE);
993 unsigned int distance_enum = name_lookup(pnode->distanceModel->strptr, distance_models);
994 switch (distance_enum) {
995 case lab::PannerNode::LINEAR_DISTANCE:
996 pannerNode->setDistanceModel(lab::PannerNode::LINEAR_DISTANCE); break;
997 case lab::PannerNode::INVERSE_DISTANCE:
998 pannerNode->setDistanceModel(lab::PannerNode::INVERSE_DISTANCE); break;
999 case lab::PannerNode::EXPONENTIAL_DISTANCE:
1000 pannerNode->setDistanceModel(lab::PannerNode::EXPONENTIAL_DISTANCE); break;
1001 default:
1002 break;
1003 }
1004
1005 ac->next_node++;
1006 ac->nodes[ac->next_node] = pannerNode;
1007 ac->nodetype[ac->next_node] = NODE_SpatialSound;
1008 srepn->inode = ac->next_node;
1009 srepn->icontext = icontext;
1010 // connect Sound output to gain input
1011 libsound_connect0(icontext, srepn->igain, srepn->inode);
1012 }
1013 pannerNode_ptr = static_cast<PannerNode*>(ac->nodes[srepn->inode].get());
1014 if (have_listenerpoint) {
1015 auto listener = context.listener();
1016 // I believe these are the defaults, and we keep our avatar at 0 and move sound sources relative to avatar
1017 //listener->forwardX()->setValue(0.0f);
1018 //listener->forwardY()->setValue(0.0f);
1019 //listener->forwardZ()->setValue(-1.0f);
1020 //listener->upX()->setValue(0.0f);
1021 //listener->upY()->setValue(1.0f);
1022 //listener->upZ()->setValue(0.0f);
1023 float* pos, * dir, * up;
1024 pos = listenerpoint_pos;
1025 dir = listenerpoint_dir;
1026 up = listenerpoint_up;
1027 listener->setForward({ dir[0],dir[1],dir[2] });
1028 listener->setUpVector({ up[0], up[1], up[2] });
1029 listener->setPosition({ pos[0],pos[1], pos[2] });
1030
1031 }
1032 else {
1033 auto listener = context.listener();
1034 //could be an enable / disable route to LP during scene, need to reset listener to default
1035 listener->setForward({ 0.0,0.0,-1.0 });
1036 listener->setUpVector({ 0.0,1.0,0.0 });
1037 listener->setPosition({ 0.0,0.0,0.0 });
1038
1039 }
1040
1041 gain_ptr = static_cast<GainNode*>(ac->nodes[srepn->igain].get());
1042 gain_ptr->gain()->setValue(pnode->intensity* pnode->gain);
1043 pannerNode_ptr->setConeInnerAngle(RAD2DEGF(pnode->coneInnerAngle));
1044 pannerNode_ptr->setConeOuterAngle(RAD2DEGF(pnode->coneOuterAngle));
1045 pannerNode_ptr->setConeOuterGain(pnode->coneOuterGain);
1046 // something you would query, not set: pannerNode_ptr->distanceGain()->setValue(0.1f);
1047 pannerNode_ptr->setRolloffFactor(pnode->rolloffFactor);
1048 pannerNode_ptr->setRefDistance(pnode->referenceDistance);
1049 pannerNode_ptr->setMaxDistance(pnode->maxDistance);
1050
1051 if (pnode->dopplerEnabled == TRUE) {
1052 float* v = pnode->__velocity.c;
1053 pannerNode_ptr->setVelocity(v[0], v[1], v[2]);
1054 //std::cout << " vel= " << v[0] << " " << v[1] << " " << v[2] << std::endl;
1055 auto listener = context.listener();
1056 listener->setDopplerFactor(1.0f); //default is 1
1057 listener->setSpeedOfSound(343.0f); //default is 343 m/s
1058 {
1059 ContextRenderLock r(&context, "ex_simple");
1060 float dr = pannerNode_ptr->dopplerRate(r);
1061 //std::cout << " doppRate " << dr << std::endl;
1062 pnode->__dopplerFactor = dr;
1063 }
1064 }
1065
1066 //std::cout << "[cg= " << pannerNode_ptr->coneGain()->value() << "]" << std::endl;
1067 // done above gain_ptr->gain()->setValue(pnode->intensity * pnode->gain);
1068
1069 float* dir0 = pnode->__lastdirection.c;
1070 float dir[3];
1071 memcpy(dir, dir0,3*sizeof(float));
1072 // dir[0] = fabs(dir[0]) < .001f ? 0.0f : dir[0];
1073 //deep mystery: Spatial.x3d when navigating in Fly: sound will cut out
1074 // but doesn't cut out if dir.y is either 0 or .001 or bigger.
1075 // same { dir, xyz } in LabSound Examples.hpp doesn't have a problem
1076 // bizarre because dir/orientation shouldn't be needed when inner and outer cone angles are both 2 PI
1077 dir[1] = fabs(dir[1]) < .001f ? copysign(.001f,dir[1]) : dir[1];
1078 //std::cout << " { " << dir[0] << ", " << dir[1] << ", " << dir[2] << ", ";
1079
1080 pannerNode_ptr->setOrientation({ dir[0], dir[1] , dir[2] });
1081 //pannerNode_ptr->orientationX()->setValue(dir[0]);
1082 //pannerNode_ptr->orientationY()->setValue(dir[1]);
1083 //pannerNode_ptr->orientationZ()->setValue(dir[2]); //Q. should it be -ve
1084
1085
1086 float* xyz0 = pnode->__lastlocation.c;
1087 float xyz[3];
1088 memcpy(xyz, xyz0, 3 * sizeof(float));
1089 //xyz[0] = fabs(xyz[0]) < .001f ? 0.0f : xyz[0];
1090 //xyz[1] = fabs(xyz[1]) < .001f ? .001f : xyz[1];
1091 //std::cout << xyz[0] << ", " << xyz[1] << ", " << xyz[2] << "}," << std::endl;
1092 //static int once = 0;
1093 //if(!once)
1094 pannerNode_ptr->setPosition(xyz[0], xyz[1], xyz[2]);
1095 // once++;
1096 //pannerNode_ptr->positionX()->setValue(xyz[0]);
1097 //pannerNode_ptr->positionY()->setValue(xyz[1]);
1098 //pannerNode_ptr->positionZ()->setValue(xyz[2]);
1099 }
1100 break;
1101 case NODE_MovieTexture:
1102 case NODE_AudioClip:
1103 {
1104 struct X3D_AudioClip* pnode = (struct X3D_AudioClip*)node;
1105 std::shared_ptr<SampledAudioNode> audioClipNode;
1106 SampledAudioNode* audioClipNode_ptr;
1107 if (!srepn->inode) {
1108 //create labsound node
1109 audioClipNode = std::make_shared<SampledAudioNode>(context);
1110 {
1111 ContextRenderLock r(ac->context.get(), "ex_simple");
1112 audioClipNode->setBus(r, busses[srepn->ibuffer]);
1113 }
1114 ac->next_node++;
1115 ac->nodes[ac->next_node] = audioClipNode;
1116 ac->nodetype[ac->next_node] = NODE_AudioClip;
1117 srepn->inode = ac->next_node;
1118 srepn->icontext = icontext;
1119 //if (iparent.x)
1120 // libsound_connect2(icontext, iparent.x, srepn->inode, iparent.y, iparent.z);
1121
1122
1123 //audioClipNode->start((float)pnode->startTime); //do we need to convert to labsound absolute time from x3d absolute time?
1124 audioClipNode->schedule(0.0, -1); // -1 to loop forever
1125
1126
1127 }
1128 //copy changed values from x3d to labsound
1129 //audioClipNode = static_cast<std::shared_ptr<SampledAudioNode>>( ac->nodes[srepn->inode] );
1130 audioClipNode_ptr = static_cast<SampledAudioNode*>(ac->nodes[srepn->inode].get());
1131 // web3d time dependent nodes have an isActive state set elsewhere (freewrl do_AudioTick)
1132 // here we turn on / off the playback depending on isActive
1133 if (1) {
1134 if (pnode->isPaused == FALSE && pnode->__context_paused) {
1135 ac->context->resume();
1136 pnode->__context_paused = FALSE;
1137 }
1138 SchedulingState status = audioClipNode_ptr->playbackState();
1139 if (status == SchedulingState::PLAYING){
1140 //if (pnode->isActive == FALSE)
1141 if (pnode->isPaused == TRUE)
1142 audioClipNode_ptr->stop(0.0);
1143 if (pnode->isPaused == TRUE) {
1144 ac->context->suspend();
1145 pnode->__context_paused = TRUE;
1146 }
1147 }
1148 else if (status != SchedulingState::PLAYING && (pnode->isActive == TRUE))
1149 audioClipNode_ptr->start(0.0, pnode->loop ? -1 : 0);
1150
1151 bool isactive = audioClipNode_ptr->isPlayingOrScheduled();
1152 //audioClipNode_ptr->setLoop(pnode->loop ? true : false);
1153 if (!isactive && pnode->loop)
1154 audioClipNode_ptr->start(0.0f, -1);
1155 audioClipNode_ptr->playbackRate()->setValue(pnode->pitch * srepn->dopplerFactor);
1156 }
1157 // audioClipNode_ptr->gain()->setValue(pnode->gain);
1158 //copy outputs from labsound to x3d
1159 //pnode->duration_changed = audioClipNode_ptr->duration();
1160 }
1161 break;
1162 case NODE_BufferAudioSource:
1163 {
1164 struct X3D_BufferAudioSource* pnode = (struct X3D_BufferAudioSource*)node;
1165 std::shared_ptr<SampledAudioNode> audioSource;
1166 SampledAudioNode* audioSource_ptr;
1167 if (!srepn->ibuffer) break; //wait for url to load
1168 if (!srepn->inode) {
1169 //create labsound node
1170 audioSource = std::make_shared<SampledAudioNode>(context);
1171 {
1172 ContextRenderLock r(ac->context.get(), "ex_simple");
1173 audioSource->setBus(r, busses[srepn->ibuffer]);
1174 }
1175 ac->next_node++;
1176 ac->nodes[ac->next_node] = audioSource;
1177 ac->nodetype[ac->next_node] = NODE_BufferAudioSource;
1178 srepn->inode = ac->next_node;
1179 srepn->icontext = icontext;
1180 //if (iparent.x)
1181 // libsound_connect2(icontext, iparent.x, srepn->inode, iparent.y, iparent.z);
1182
1183
1184 //audioClipNode->start((float)pnode->startTime); //do we need to convert to labsound absolute time from x3d absolute time?
1185 //audioSource->schedule(0.0, -1); // -1 to loop forever
1186
1187
1188 }
1189 //copy changed values from x3d to labsound
1190 //audioClipNode = static_cast<std::shared_ptr<SampledAudioNode>>( ac->nodes[srepn->inode] );
1191 audioSource_ptr = static_cast<SampledAudioNode*>(ac->nodes[srepn->inode].get());
1192 // web3d time dependent nodes have an isActive state set elsewhere (freewrl do_AudioTick)
1193 // here we turn on / off the playback depending on isActive
1194 if (1) {
1195 if (pnode->isPaused == FALSE && pnode->__context_paused) {
1196 ac->context->resume();
1197 pnode->__context_paused = FALSE;
1198 }
1199 SchedulingState status = audioSource_ptr->playbackState();
1200 if (status == SchedulingState::PLAYING) {
1201 if (pnode->isActive == FALSE)
1202 audioSource_ptr->stop(0.0);
1203 if (pnode->isPaused == TRUE) {
1204 ac->context->suspend();
1205 pnode->__context_paused = TRUE;
1206 }
1207 }
1208 else if (status != SchedulingState::PLAYING && (pnode->isActive == TRUE))
1209 audioSource_ptr->start(0.0, pnode->loop ? -1 : 0);
1210
1211 audioSource_ptr->playbackRate()->setValue(pnode->playbackRate);
1212 audioSource_ptr->detune()->setValue(pnode->detune);
1213
1214 }
1215 // audioClipNode_ptr->gain()->setValue(pnode->gain);
1216 //copy outputs from labsound to x3d
1217 //pnode->duration_changed = audioClipNode_ptr->duration();
1218 }
1219 break;
1220
1221
1222 case NODE_OscillatorSource:
1223 {
1224 struct X3D_OscillatorSource* pnode = (struct X3D_OscillatorSource*)node;
1225 //if (!pnode->_self) {
1226 std::shared_ptr<OscillatorNode> oscillator;
1227 OscillatorNode* oscillator_ptr;
1228 // GainNode* gain_ptr;
1229 if (!srepn->inode) {
1230 //gain node on output
1231 //std::shared_ptr<GainNode> gain;
1233 //gain = std::make_shared<GainNode>(context);
1234 //gain_ptr = gain.get();
1235 //ac->next_node++;
1236 //ac->nodes[ac->next_node] = gain;
1237 //ac->nodetype[ac->next_node] = NODE_Gain;
1238 //srepn->igain = ac->next_node;
1240 //if (iparent.x)
1241 // libsound_connect2(icontext, iparent.x, srepn->igain, iparent.y, iparent.z);
1242
1243 //create labsound node
1244 oscillator = std::make_shared<OscillatorNode>(context);
1245 ac->next_node++;
1246 ac->nodes[ac->next_node] = oscillator;
1247 ac->nodetype[ac->next_node] = NODE_OscillatorSource;
1248 srepn->inode = ac->next_node;
1249 srepn->icontext = icontext;
1250
1251 //connect source node output to parent node input
1252 //libsound_connect0(icontext, srepn->igain, srepn->inode);
1253 //if (iparent.x)
1254 // libsound_connect2(icontext, iparent.x, srepn->inode, iparent.y, iparent.z);
1255
1256 //oscillator->start(0.0f);
1257 }
1258 //copy changed values from x3d to labsound
1259
1260 //gain_ptr = static_cast<GainNode*>(ac->nodes[srepn->igain].get());
1261 //gain_ptr->gain()->setValue(pnode->gain);
1262 oscillator_ptr = static_cast<OscillatorNode*>(ac->nodes[srepn->inode].get());
1263
1264 oscillator_ptr->frequency()->setValue(pnode->frequency);
1265 //printf("from libsound updateNode0 OscillatorSource > detun %f\n", pnode->detune);
1266 oscillator_ptr->detune()->setValue(pnode->detune);
1267 //periodicWave_types
1268 OscillatorType wave_type = (OscillatorType) name_lookup(pnode->type->strptr, periodicWave_types);
1269 //switch (wave_type) {
1270 //case OscillatorType::SINE:
1271 // oscillator_ptr->setType(OscillatorType::SINE); break;
1272 //case OscillatorType::SQUARE:
1273 // oscillator_ptr->setType(OscillatorType::SQUARE); break;
1274 //case OscillatorType::SAWTOOTH:
1275 // oscillator_ptr->setType(OscillatorType::SAWTOOTH); break;
1276 //case OscillatorType::TRIANGLE:
1277 // oscillator_ptr->setType(OscillatorType::TRIANGLE); break;
1278 //case OscillatorType::CUSTOM:
1279 // oscillator_ptr->setType(OscillatorType::CUSTOM); break;
1280 //default:
1281 // oscillator_ptr->setType(OscillatorType::OSCILLATOR_NONE);
1282 // break;
1283 //}
1284 oscillator_ptr->setType(wave_type);
1285 if(wave_type == OscillatorType::CUSTOM && pnode->periodicWave){
1286 }
1287
1288 if (pnode->isPaused == FALSE && pnode->__context_paused) {
1289 ac->context->resume();
1290 pnode->__context_paused = FALSE;
1291 }
1292 SchedulingState status = oscillator_ptr->playbackState();
1293 if (status == SchedulingState::PLAYING) {
1294 if (pnode->isActive == FALSE)
1295 oscillator_ptr->stop(0.0);
1296 if (pnode->isPaused == TRUE) {
1297 ac->context->suspend();
1298 pnode->__context_paused = TRUE;
1299 }
1300 }
1301 else if (status != SchedulingState::PLAYING && (pnode->isActive == TRUE))
1302 oscillator_ptr->start(0.0);
1303
1304
1305 }
1306 break;
1307 case NODE_PeriodicWave:
1308 {
1309 struct X3D_PeriodicWave* pnode = (struct X3D_PeriodicWave*)node;
1310 std::shared_ptr<PeriodicWave> pwave; //using an older term but equivalent WaveTable == PeriodicWave
1311 if (1) {
1312 if (iparent.p) {
1313 std::shared_ptr<AudioNode> oscillator = ac->nodes[iparent.p];
1314 OscillatorNode* oscillator_ptr =
1315 static_cast<OscillatorNode*>(oscillator.get());
1316 //periodicWave_types
1317 unsigned int wave_type = name_lookup(pnode->type->strptr, periodicWave_types);
1318 switch (wave_type) {
1319 case OscillatorType::SINE:
1320 oscillator_ptr->setType(OscillatorType::SINE); break;
1321 case OscillatorType::SQUARE:
1322 oscillator_ptr->setType(OscillatorType::SQUARE); break;
1323 case OscillatorType::SAWTOOTH:
1324 oscillator_ptr->setType(OscillatorType::SAWTOOTH); break;
1325 case OscillatorType::TRIANGLE:
1326 oscillator_ptr->setType(OscillatorType::TRIANGLE); break;
1327 case OscillatorType::CUSTOM:
1328 oscillator_ptr->setType(OscillatorType::CUSTOM); break;
1329 default:
1330 oscillator_ptr->setType(OscillatorType::OSCILLATOR_NONE);
1331 break;
1332 }
1333 if (pnode->optionsReal.n != 0) {
1335 //context.createPeriodicWave
1336 }
1337 //oscillator_ptr->setType(static_cast<OscillatorType>(wave_type));
1338 }
1339 }
1340 //copy changed values from x3d to labsound
1341 //copy outputs from labsound to x3d
1342
1343 }
1344
1345 case NODE_Gain:
1346 {
1347 struct X3D_Gain* pnode = (struct X3D_Gain*)node;
1348 std::shared_ptr<GainNode> gain;
1349 GainNode* gain_ptr;
1350 if (!srepn->inode) {
1351 //create labsound node
1352 gain = std::make_shared<GainNode>(context);
1353 gain_ptr = gain.get();
1354 ac->next_node++;
1355 ac->nodes[ac->next_node] = gain;
1356 ac->nodetype[ac->next_node] = NODE_Gain;
1357 srepn->inode = ac->next_node;
1358 srepn->icontext = icontext;
1359 if (nondefault_channelinterp) {
1360 ChannelInterpretation interp;
1361 ChannelCountMode cmode;
1362 getChannelInterpretation(pnode->channelInterpretation->strptr, pnode->channelCountMode->strptr, &interp, &cmode);
1363 gain_ptr->setChannelInterpretation(interp);
1364 {
1365 ContextGraphLock g(ac->context.get(), "ex_simple");
1366 gain_ptr->setChannelCountMode(g, cmode);
1367 }
1368 }
1369 //connect source node output to parent node input
1370 //if (iparent.x)
1371 // libsound_connect2(icontext, iparent.x, srepn->inode, iparent.y, iparent.z);
1372
1373 }
1374 gain_ptr = dynamic_cast<GainNode*>(ac->nodes[srepn->inode].get());
1375 //copy changed values from x3d to labsound
1376 gain_ptr->gain()->setValue(pnode->gain);
1377 //gain_ptr->silenceOutputs()
1378 //gain_ptr->unsilenceOutputs()
1379
1380 //copy outputs from labsound to x3d
1381
1382 }
1383 break;
1384 case NODE_ChannelSplitter:
1385 {
1386 struct X3D_ChannelSplitter* pnode = (struct X3D_ChannelSplitter*)node;
1387 std::shared_ptr<ChannelSplitterNode> splitter;
1388 ChannelSplitterNode* splitter_ptr;
1389 //GainNode* gain_ptr;
1390 if (!srepn->inode) {
1392 //std::shared_ptr<GainNode> gain;
1394 //gain = std::make_shared<GainNode>(context);
1395 //gain_ptr = gain.get();
1396 //ac->next_node++;
1397 //ac->nodes[ac->next_node] = gain;
1398 //ac->nodetype[ac->next_node] = NODE_Gain;
1399 //srepn->igain = ac->next_node;
1401 //if (iparent.x)
1402 // libsound_connect2(icontext, iparent.x, srepn->igain, iparent.y, iparent.z);
1403
1404 splitter = std::make_shared<ChannelSplitterNode>(context,pnode->channelCount);
1405 splitter_ptr = splitter.get();
1406 //splitter_ptr->output()
1407 ac->next_node++;
1408 ac->nodes[ac->next_node] = splitter;
1409 ac->nodetype[ac->next_node] = NODE_ChannelSplitter;
1410 srepn->inode = ac->next_node;
1411 srepn->icontext = icontext;
1412 if (nondefault_channelinterp) {
1413
1414 ChannelInterpretation interp;
1415 ChannelCountMode cmode;
1416 getChannelInterpretation(pnode->channelInterpretation->strptr, pnode->channelCountMode->strptr, &interp, &cmode);
1417 splitter_ptr->setChannelInterpretation(interp);
1418 {
1419 ContextGraphLock g(ac->context.get(), "ex_simple");
1420 splitter_ptr->setChannelCountMode(g, cmode);
1421 }
1422 }
1423 //connect source node output to parent node input
1424 //libsound_connect0(icontext, srepn->igain, srepn->inode);
1425 //if (iparent.x)
1426 // libsound_connect2(icontext, iparent.x, srepn->inode, iparent.y, iparent.z);
1427
1428 //splitter is the one sound node that doesn't follow single-parent-multiple-children paradigm:
1429 // - it has multiple parents, meaning the output can go to multiple nodes,
1430 /* for (int i = 0; i < std::min(pnode->channelCount,pnode->outputs.n); i++)
1431 {
1432 struct X3D_Node* onode = (struct X3D_Node*)pnode->outputs.p[i];
1433 struct X3D_SoundRep* srepo = getSoundRep(onode);
1434 if (srepo && srepo->inode)
1435 libsound_connect1(icontext, srepo->inode, srepn->inode,i);
1436 }*/
1437
1438 }
1439 //Q. I want to add more connections on-the-fly, but how to prevent double connections on routine node recompile
1440 // and how to avoid missing a new connection when otherwise node doesn't need recompile?
1441 // Options:
1442 // add __field to ChannelSplitter, and iterate over first, before adding connection
1443 // add field to X3DSoundRep just for splitter connection tracking, (int,int) (parent,srcIndex)
1444 //if (iparent.x)
1445 // libsound_connect2(icontext, iparent.x, srepn->inode, iparent.y, iparent.z);
1446
1447 //gain_ptr = dynamic_cast<GainNode*>(ac->nodes[srepn->igain].get());
1449 //gain_ptr->gain()->setValue(pnode->gain);
1450 splitter_ptr = dynamic_cast<ChannelSplitterNode*>(ac->nodes[srepn->inode].get());
1451 //copy outputs from labsound to x3d
1452
1453 }
1454 break;
1455 case NODE_ChannelSelector:
1456 {
1457 //doesn't do anything except push a channel ID onto a stack in Component_Sound
1458 }
1459 break;
1460 case NODE_ChannelMerger:
1461 {
1462 struct X3D_ChannelMerger* pnode = (struct X3D_ChannelMerger*)node;
1463 std::shared_ptr<ChannelMergerNode> merger;
1464 ChannelMergerNode* merger_ptr;
1465 //GainNode* gain_ptr;
1466 if (!srepn->inode) {
1468 //std::shared_ptr<GainNode> gain;
1470 //gain = std::make_shared<GainNode>(context);
1471 //gain_ptr = gain.get();
1472 //ac->next_node++;
1473 //ac->nodes[ac->next_node] = gain;
1474 //ac->nodetype[ac->next_node] = NODE_Gain;
1475 //srepn->igain = ac->next_node;
1477 //if (iparent.x)
1478 // libsound_connect2(icontext, iparent.x, srepn->igain, iparent.y, iparent.z);
1479
1480 merger = std::make_shared<ChannelMergerNode>(context, pnode->channelCount);
1481 merger_ptr = merger.get();
1482 if (nondefault_channelinterp) {
1483
1484 ChannelInterpretation interp;
1485 ChannelCountMode cmode;
1486 getChannelInterpretation(pnode->channelInterpretation->strptr, pnode->channelCountMode->strptr, &interp, &cmode);
1487 merger_ptr->setChannelInterpretation(interp);
1488 {
1489 ContextGraphLock g(ac->context.get(), "ex_simple");
1490 merger_ptr->setChannelCountMode(g, cmode);
1491 }
1492 }
1493 ac->next_node++;
1494 ac->nodes[ac->next_node] = merger;
1495 ac->nodetype[ac->next_node] = NODE_ChannelMerger;
1496 srepn->inode = ac->next_node;
1497 srepn->icontext = icontext;
1498 //connect source node output to parent node input
1499 //libsound_connect0(icontext, srepn->igain, srepn->inode);
1500 //if (iparent.x)
1501 // libsound_connect2(icontext, iparent.x, srepn->inode, iparent.y, iparent.z);
1502
1503 }
1504 //gain_ptr = dynamic_cast<GainNode*>(ac->nodes[srepn->igain].get());
1505 //copy changed values from x3d to labsound
1506 //gain_ptr->gain()->setValue(pnode->gain);
1507 merger_ptr = dynamic_cast<ChannelMergerNode*>(ac->nodes[srepn->inode].get());
1508 //copy outputs from labsound to x3d
1509
1510 }
1511 break;
1512 case NODE_Delay:
1513 {
1514 struct X3D_Delay* pnode = (struct X3D_Delay*)node;
1515 std::shared_ptr<DelayNode> delay;
1516 DelayNode* delay_ptr;
1517 std::shared_ptr<GainNode> gain;
1518 //GainNode* gain_ptr;
1519 if (!srepn->inode) {
1521 //gain = std::make_shared<GainNode>(context);
1522 //gain_ptr = gain.get();
1523 //ac->next_node++;
1524 //ac->nodes[ac->next_node] = gain;
1525 //ac->nodetype[ac->next_node] = NODE_Gain;
1526 //srepn->igain = ac->next_node;
1527 //srepn->icontext = icontext;
1529 //if (iparent.x)
1530 // libsound_connect2(icontext, iparent.x, srepn->igain, iparent.y, iparent.z);
1531
1532 delay = std::make_shared<DelayNode>(context,pnode->maxDelayTime);
1533 delay_ptr = delay.get();
1534 ac->next_node++;
1535 ac->nodes[ac->next_node] = delay;
1536 ac->nodetype[ac->next_node] = NODE_Delay;
1537 srepn->inode = ac->next_node;
1538 srepn->icontext = icontext;
1539 if (nondefault_channelinterp) {
1540
1541 ChannelInterpretation interp;
1542 ChannelCountMode cmode;
1543 getChannelInterpretation(pnode->channelInterpretation->strptr, pnode->channelCountMode->strptr, &interp, &cmode);
1544 delay_ptr->setChannelInterpretation(interp);
1545 {
1546 ContextGraphLock g(ac->context.get(), "ex_simple");
1547 delay_ptr->setChannelCountMode(g, cmode);
1548 }
1549 }
1550 //connect source node output to parent node input
1551 //libsound_connect0(icontext, srepn->igain, srepn->inode);
1552 //if (iparent.x)
1553 // libsound_connect2(icontext, iparent.x, srepn->inode, iparent.y, iparent.z);
1554
1555 }
1556 //gain_ptr = dynamic_cast<GainNode*>(ac->nodes[srepn->igain].get());
1557 //copy changed values from x3d to labsound
1558 //gain_ptr->gain()->setValue(pnode->gain);
1559 delay_ptr = dynamic_cast<DelayNode*>(ac->nodes[srepn->inode].get());
1560 delay_ptr->delayTime()->setFloat((float)pnode->delayTime,false);
1561
1562
1563 }
1564 break;
1565 case NODE_Analyser:
1566 {
1567 struct X3D_Analyser* pnode = (struct X3D_Analyser*)node;
1568 std::shared_ptr<AnalyserNode> analyser;
1569 AnalyserNode* analyser_ptr;
1570 //std::shared_ptr<GainNode> gain;
1571 //GainNode* gain_ptr;
1572 if (!srepn->inode) {
1574 //gain = std::make_shared<GainNode>(context);
1575 //gain_ptr = gain.get();
1576 //ac->next_node++;
1577 //ac->nodes[ac->next_node] = gain;
1578 //ac->nodetype[ac->next_node] = NODE_Gain;
1579 //srepn->igain = ac->next_node;
1580 //srepn->icontext = icontext;
1582 //if (iparent.x)
1583 // libsound_connect2(icontext, iparent.x, srepn->igain, iparent.y, iparent.z);
1584
1585 analyser = std::make_shared<AnalyserNode>(context,pnode->fftSize);
1586 analyser_ptr = analyser.get();
1587 ac->next_node++;
1588 ac->nodes[ac->next_node] = analyser;
1589 ac->nodetype[ac->next_node] = NODE_Analyser;
1590 srepn->inode = ac->next_node;
1591 srepn->icontext = icontext;
1592 if (nondefault_channelinterp) {
1593
1594 ChannelInterpretation interp;
1595 ChannelCountMode cmode;
1596 getChannelInterpretation(pnode->channelInterpretation->strptr, pnode->channelCountMode->strptr, &interp, &cmode);
1597 analyser_ptr->setChannelInterpretation(interp);
1598 {
1599 ContextGraphLock g(ac->context.get(), "ex_simple");
1600 analyser_ptr->setChannelCountMode(g, cmode);
1601 }
1602 }
1603 //connect source node output to parent node input
1604 //libsound_connect0(icontext, srepn->igain, srepn->inode);
1605 //if (iparent.x)
1606 // libsound_connect2(icontext, iparent.x, srepn->inode, iparent.y, iparent.z);
1607
1608
1609 }
1610 //gain_ptr = dynamic_cast<GainNode*>(ac->nodes[srepn->igain].get());
1612 //gain_ptr->gain()->setValue(pnode->gain);
1613 analyser_ptr = dynamic_cast<AnalyserNode*>(ac->nodes[srepn->inode].get());
1614 pnode->frequencyBinCount = (int)analyser_ptr->frequencyBinCount();
1615 //printf("start analyser bins=%d\n", pnode->frequencyBinCount);
1616
1617 analyser_ptr->setMaxDecibels(pnode->maxDecibels);
1618 analyser_ptr->setMinDecibels(pnode->minDecibels);
1619 analyser_ptr->setSmoothingTimeConstant(pnode->smoothingTimeConstant);
1620 {
1621 if (ac->bytearray == nullptr)
1622 ac->bytearray = std::make_shared<std::vector<std::uint8_t>>(4096);
1623 if (ac->floatarray == nullptr)
1624 ac->floatarray = std::make_shared<std::vector<std::float_t>>(4096);
1625
1626 std::vector<std::float_t>* floatarray = ac->floatarray.get();
1627 std::vector<std::uint8_t>* bytearray = ac->bytearray.get();
1628 //analyser_ptr->getByteFrequencyData(*bytearray);
1629 analyser_ptr->getFloatFrequencyData(*floatarray);
1630 //if (pnode->byteFrequencyData.n == 0){
1631 // pnode->byteFrequencyData.p = (int*)malloc(pnode->frequencyBinCount/4);
1632 // pnode->byteFrequencyData.n = pnode->frequencyBinCount / 4;
1633 //}
1634 if (pnode->floatFrequencyData.n == 0) {
1635 pnode->floatFrequencyData.p = (float*)malloc(pnode->frequencyBinCount * sizeof(std::float_t));
1636 pnode->floatFrequencyData.n = pnode->frequencyBinCount;
1637 }
1638
1639 std::uint8_t* p8 = (std::uint8_t*)pnode->byteFrequencyData.p;
1640 std::float_t* ff = (std::float_t*)pnode->floatFrequencyData.p;
1641 for (int i = 0; i < pnode->frequencyBinCount; i++) {
1642 //brute force. Is there a memcpy path?
1643 //p8[i] = (*bytearray)[i];
1644 ff[i] = (*floatarray)[i];
1645 }
1646 //analyser_ptr->getByteTimeDomainData(*bytearray);
1647 analyser_ptr->getFloatTimeDomainData(*floatarray);
1648 //if (pnode->byteTimeDomainData.n == 0) {
1649 // pnode->byteTimeDomainData.p = (int*)malloc(pnode->frequencyBinCount / 4);
1650 // pnode->byteTimeDomainData.n = pnode->frequencyBinCount / 4;
1651 //}
1652 if (pnode->floatTimeDomainData.n == 0) {
1653 pnode->floatTimeDomainData.p = (float*)malloc(pnode->frequencyBinCount * sizeof(std::float_t));
1654 pnode->floatTimeDomainData.n = pnode->frequencyBinCount;
1655 }
1656
1657 //std::uint8_t* p8t = (std::uint8_t*)pnode->byteTimeDomainData.p;
1658 std::float_t* fft = (std::float_t*)pnode->floatTimeDomainData.p;
1659 for (int i = 0; i < pnode->frequencyBinCount; i++) {
1660 //brute force. Is there a memcpy path?
1661 //p8t[i] = (*bytearray)[i];
1662 fft[i] = (*floatarray)[i];
1663 }
1664
1665 }
1666 //printf("end analyser\n");
1667 }
1668 break;
1669 case NODE_BiquadFilter:
1670 {
1671 static struct key_name biquad_types[] = {
1672 {FilterType::ALLPASS, "ALLPASS"},
1673 {FilterType::BANDPASS, "BANDPASS"},
1674 {FilterType::FILTER_NONE, "NONE"},
1675 {FilterType::HIGHPASS, "HIGHPASS"},
1676 {FilterType::HIGHSHELF, "HIGHSHELF"},
1677 {FilterType::LOWPASS, "LOWPASS"},
1678 {FilterType::LOWSHELF, "LOWSHELF"},
1679 {FilterType::NOTCH, "NOTCH"},
1680 {FilterType::NOTCH, "PEAKING"},
1681 };
1682
1683 struct X3D_BiquadFilter* pnode = (struct X3D_BiquadFilter*)node;
1684 std::shared_ptr<BiquadFilterNode> biquad;
1685 BiquadFilterNode* biquad_ptr;
1686 //std::shared_ptr<GainNode> gain;
1687 //GainNode* gain_ptr;
1688 if (!srepn->inode) {
1689 //create labsound node
1690 //gain = std::make_shared<GainNode>(context);
1691 //gain_ptr = gain.get();
1692 //ac->next_node++;
1693 //ac->nodes[ac->next_node] = gain;
1694 //ac->nodetype[ac->next_node] = NODE_Gain;
1695 //srepn->igain = ac->next_node;
1696 //srepn->icontext = icontext;
1698 //if (iparent.x)
1699 // libsound_connect2(icontext, iparent.x, srepn->igain, iparent.y, iparent.z);
1700
1701 biquad = std::make_shared<BiquadFilterNode>(context);
1702 biquad_ptr = biquad.get();
1703 if (nondefault_channelinterp) {
1704
1705 ChannelInterpretation interp;
1706 ChannelCountMode cmode;
1707 getChannelInterpretation(pnode->channelInterpretation->strptr, pnode->channelCountMode->strptr, &interp, &cmode);
1708 biquad_ptr->setChannelInterpretation(interp);
1709 {
1710 ContextGraphLock g(ac->context.get(), "ex_simple");
1711 biquad_ptr->setChannelCountMode(g, cmode);
1712 }
1713 }
1714 ac->next_node++;
1715 ac->nodes[ac->next_node] = biquad;
1716 ac->nodetype[ac->next_node] = NODE_BiquadFilter;
1717 srepn->inode = ac->next_node;
1718 srepn->icontext = icontext;
1719 //connect source node output to parent node input
1720 //libsound_connect0(icontext, srepn->igain, srepn->inode);
1721 //if (iparent.x)
1722 // libsound_connect2(icontext, iparent.x, srepn->inode, iparent.y, iparent.z);
1723
1724
1725 }
1726 //gain_ptr = dynamic_cast<GainNode*>(ac->nodes[srepn->igain].get());
1728 //gain_ptr->gain()->setValue(pnode->gain);
1729 biquad_ptr = dynamic_cast<BiquadFilterNode*>(ac->nodes[srepn->inode].get());
1730 FilterType biquad_type = (FilterType)name_lookup(pnode->type->strptr, biquad_types);
1731 biquad_ptr->setType(biquad_type);
1732 biquad_ptr->detune()->setValue(pnode->detune);
1733 biquad_ptr->frequency()->setValue(pnode->frequency);
1734 biquad_ptr->q()->setValue(pnode->qualityFactor); //.Q is quality factor
1735 }
1736 break;
1737 case NODE_DynamicsCompressor:
1738 {
1739 struct X3D_DynamicsCompressor* pnode = (struct X3D_DynamicsCompressor*)node;
1740 std::shared_ptr<DynamicsCompressorNode> dynamics;
1741 DynamicsCompressorNode* dynamics_ptr;
1742 //std::shared_ptr<GainNode> gain;
1743 //GainNode* gain_ptr;
1744 if (!srepn->inode) {
1746 //gain = std::make_shared<GainNode>(context);
1747 //gain_ptr = gain.get();
1748 //ac->next_node++;
1749 //ac->nodes[ac->next_node] = gain;
1750 //ac->nodetype[ac->next_node] = NODE_Gain;
1751 //srepn->igain = ac->next_node;
1752 //srepn->icontext = icontext;
1754 //if (iparent.x)
1755 // libsound_connect2(icontext, iparent.x, srepn->igain, iparent.y, iparent.z);
1756
1757 dynamics = std::make_shared<DynamicsCompressorNode>(context);
1758 dynamics_ptr = dynamics.get();
1759 if (nondefault_channelinterp) {
1760
1761 ChannelInterpretation interp;
1762 ChannelCountMode cmode;
1763 getChannelInterpretation(pnode->channelInterpretation->strptr, pnode->channelCountMode->strptr, &interp, &cmode);
1764 dynamics_ptr->setChannelInterpretation(interp);
1765 {
1766 ContextGraphLock g(ac->context.get(), "ex_simple");
1767 dynamics_ptr->setChannelCountMode(g, cmode);
1768 }
1769 }
1770 ac->next_node++;
1771 ac->nodes[ac->next_node] = dynamics;
1772 ac->nodetype[ac->next_node] = NODE_DynamicsCompressor;
1773 srepn->inode = ac->next_node;
1774 srepn->icontext = icontext;
1775 //connect source node output to parent node input
1776 //libsound_connect0(icontext, srepn->igain, srepn->inode);
1777 //if (iparent.x)
1778 // libsound_connect2(icontext, iparent.x, srepn->inode, iparent.y, iparent.z);
1779
1780
1781 }
1782 //gain_ptr = dynamic_cast<GainNode*>(ac->nodes[srepn->igain].get());
1784 //gain_ptr->gain()->setValue(pnode->gain);
1785 dynamics_ptr = dynamic_cast<DynamicsCompressorNode*>(ac->nodes[srepn->inode].get());
1786 dynamics_ptr->attack()->setValue((float)pnode->attack);
1787 dynamics_ptr->knee()->setValue(pnode->knee);
1788 dynamics_ptr->ratio()->setValue(pnode->ratio);
1789 dynamics_ptr->release()->setValue((float)pnode->release);
1790 dynamics_ptr->threshold()->setValue(pnode->threshold);
1791 pnode->reduction = dynamics_ptr->reduction()->value();
1792
1793 }
1794 break;
1795 case NODE_WaveShaper:
1796 {
1797 struct X3D_WaveShaper* pnode = (struct X3D_WaveShaper*)node;
1798 std::shared_ptr<WaveShaperNode> wave;
1799 WaveShaperNode* wave_ptr;
1800 //std::shared_ptr<GainNode> gain;
1801 //GainNode* gain_ptr;
1802 if (!srepn->inode) {
1804 //gain = std::make_shared<GainNode>(context);
1805 //gain_ptr = gain.get();
1806 //ac->next_node++;
1807 //ac->nodes[ac->next_node] = gain;
1808 //ac->nodetype[ac->next_node] = NODE_Gain;
1809 //srepn->igain = ac->next_node;
1810 //srepn->icontext = icontext;
1812 //if (iparent.x)
1813 // libsound_connect2(icontext, iparent.x, srepn->igain, iparent.y, iparent.z);
1814
1815 wave = std::make_shared<WaveShaperNode>(context);
1816 wave_ptr = wave.get();
1817 if (nondefault_channelinterp) {
1818
1819 ChannelInterpretation interp;
1820 ChannelCountMode cmode;
1821 getChannelInterpretation(pnode->channelInterpretation->strptr, pnode->channelCountMode->strptr, &interp, &cmode);
1822 wave_ptr->setChannelInterpretation(interp);
1823 {
1824 ContextGraphLock g(ac->context.get(), "ex_simple");
1825 wave_ptr->setChannelCountMode(g, cmode);
1826 }
1827 }
1828 ac->next_node++;
1829 ac->nodes[ac->next_node] = wave;
1830 ac->nodetype[ac->next_node] = NODE_WaveShaper;
1831 srepn->inode = ac->next_node;
1832 srepn->icontext = icontext;
1833 //connect source node output to parent node input
1834 //libsound_connect0(icontext, srepn->igain, srepn->inode);
1835 //if (iparent.x)
1836 // libsound_connect2(icontext, iparent.x, srepn->inode, iparent.y, iparent.z);
1837
1838 }
1839 //gain_ptr = dynamic_cast<GainNode*>(ac->nodes[srepn->igain].get());
1841 //gain_ptr->gain()->setValue(pnode->gain);
1842 wave_ptr = dynamic_cast<WaveShaperNode*>(ac->nodes[srepn->inode].get());
1843 std::vector<float> curve(pnode->curve.n);
1844 printf("pnode->curve.n %d p[0] %f p[44100-1] %f", pnode->curve.n, pnode->curve.p[0], pnode->curve.p[pnode->curve.n-1]);
1845 for (int i = 0; i < pnode->curve.n; i++)
1846 curve[i] = pnode->curve.p[i];
1847 printf("curve[0] %f curve[-1] %f oversampe %s", curve[0], curve[curve.size() - 1], pnode->oversample->strptr);
1848 wave_ptr->setCurve(curve);
1849 OverSampleType oversample_type = (OverSampleType)name_lookup(pnode->oversample->strptr, waveshaper_oversampling_types);
1850 wave_ptr->setOversample(oversample_type);
1851
1852 }
1853 break;
1854 case NODE_Convolver:
1855 {
1856 struct X3D_Convolver * pnode = (struct X3D_Convolver*)node;
1857 std::shared_ptr<ConvolverNode> convolver;
1858 ConvolverNode* convolver_ptr;
1859 //std::shared_ptr<GainNode> gain;
1860 //GainNode* gain_ptr;
1861 if (!srepn->inode) {
1863 //gain = std::make_shared<GainNode>(context);
1864 //gain_ptr = gain.get();
1865 //ac->next_node++;
1866 //ac->nodes[ac->next_node] = gain;
1867 //ac->nodetype[ac->next_node] = NODE_Gain;
1868 //srepn->igain = ac->next_node;
1869 //srepn->icontext = icontext;
1871 //if (iparent.x)
1872 // libsound_connect2(icontext, iparent.x, srepn->igain, iparent.y, iparent.z);
1873
1874 convolver = std::make_shared<ConvolverNode>(context);
1875 convolver_ptr = convolver.get();
1876 if (nondefault_channelinterp) {
1877
1878 ChannelInterpretation interp;
1879 ChannelCountMode cmode;
1880 getChannelInterpretation(pnode->channelInterpretation->strptr, pnode->channelCountMode->strptr, &interp, &cmode);
1881 convolver_ptr->setChannelInterpretation(interp);
1882 {
1883 ContextGraphLock g(ac->context.get(), "ex_simple");
1884 convolver_ptr->setChannelCountMode(g, cmode);
1885 }
1886 }
1887 ac->next_node++;
1888 ac->nodes[ac->next_node] = convolver;
1889 ac->nodetype[ac->next_node] = NODE_Convolver;
1890 srepn->inode = ac->next_node;
1891 srepn->icontext = icontext;
1892 //connect source node output to parent node input
1893 //libsound_connect0(icontext, srepn->igain, srepn->inode);
1894 //if (iparent.x)
1895 // libsound_connect2(icontext, iparent.x, srepn->inode, iparent.y, iparent.z);
1896
1897 }
1898 //gain_ptr = dynamic_cast<GainNode*>(ac->nodes[srepn->igain].get());
1900 //gain_ptr->gain()->setValue(pnode->gain);
1901 convolver_ptr = dynamic_cast<ConvolverNode*>(ac->nodes[srepn->inode].get());
1902 convolver_ptr->setNormalize(pnode->normalize);
1903 if (pnode->buffer) {
1904 struct X3D_AudioBuffer* abuf = (struct X3D_AudioBuffer*)pnode->buffer;
1905 if (abuf->__sourceNumber > 0)
1906 convolver_ptr->setImpulse(busses[abuf->__sourceNumber]); // or srep->ibuffer
1907 }
1908 }
1909 break;
1910 case NODE_MicrophoneSource:
1911 {
1912 struct X3D_MicrophoneSource* pnode = (struct X3D_MicrophoneSource*)node;
1913 std::shared_ptr<AudioHardwareInputNode> input;
1914 AudioHardwareInputNode* input_ptr;
1915 if (!srepn->inode) {
1916 //create labsound node
1917 {
1918 ContextRenderLock r(ac->context.get(), "microphone");
1919 //input = lab::MakeAudioHardwareInputNode(r);
1920 std::shared_ptr<AudioHardwareInputNode> inputNode(
1921 new AudioHardwareInputNode(*ac->context.get(), ac->context.get()->destinationNode()->device()->sourceProvider()));
1922 input = inputNode;
1923 //ac->context.get()->connect(ac->context.get()->destinationNode(), inputNode, 0, 0);
1924 }
1925
1926 ac->next_node++;
1927 ac->nodes[ac->next_node] = input;
1928 ac->nodetype[ac->next_node] = NODE_MicrophoneSource;
1929 srepn->inode = ac->next_node;
1930 srepn->icontext = icontext;
1931 //if (iparent.x)
1932 // libsound_connect2(icontext, iparent.x, srepn->inode, iparent.y, iparent.z);
1933 }
1934 //copy changed values from x3d to labsound
1935 input_ptr = static_cast<AudioHardwareInputNode*>(ac->nodes[srepn->inode].get());
1936 }
1937 break;
1938
1939 //case NODE_AudioDestination:
1940 //{
1941 // struct X3D_AudioDestinationn* pnode = (struct X3D_AudioDestination*)node;
1942 // if (!srepn->inode) {
1943 // //create labsound node
1944 // }
1945
1946 //}
1947 //break;
1948
1949 default:
1950 return;
1951
1952 }
1953 }
1954
1955
1956#ifdef __cplusplus
1957}
1958#endif
1959
1960/*
1961 Jan 3, 2023 version 4 have some sound_control working
1962 - need the spatial / panner node now
1963 Dec 25, 2022 prototype III
1964 0) make an ls_create_context function and cast back to void to return to C.
1965 1) make an ls_render_node for each of 22 x3d audio nodes,
1966 and in each specific ls_render_node create the appropriate labsound node type,
1967 and cast back to void* and store in the x3d node
1968 2) make a ls_connect(ls_context, x3dnode1, x3dnode2)
1969 in the function, switch-case on x3dnode type to caste void* to appropriate labsound node type
1970 then call the labsound context.connect(labsound_node1, labsound_node2);
1971 */
1972// June 22, 2020 - the above is a prototype I,
1973// proposed prototype II:
1974// 1) create_context launches a worker thread per context
1975// the worker thread is in C++ and holds smart pointer variables
1976// 2) the worker thread loops, and once per loop waits on a condition variable
1977// 3) once per rendering frame, the browser render_context(node context) sets the condition variable
1978// 4) if first time, the context worker thread creates all the labsound nodes and connects them
1979// to match the x3d declared context and connected child nodes
1980// 5) the subsequent frames, if any field has been changed on the x3d audio nodes,
1981// the condition varible is set and the worker thread updates the changed fields on
1982// the labsound nodes
1983// 6) at end of program run, the condition variable is set and the worker does an exit,
1984// triggering garbage collection of smart pointer variables
1985//
1986// https://en.cppreference.com/w/cpp/thread/condition_variable
1987// -shows std::condition_variable and thread.
1988// But there's a web3d difference between SpatializedSound and SoundEffect nodes:
1989// SoundEffects
1990// - may not make the June30 cutoff for v4, but SpatializedSound will
1991// - have a competing method: a script method, like a script node,
1992// - with webAudio equivalnet api exposed to the js engine for scripting
1993// I have no idea how hard it would be to expose labsound api to js.
1994// - H0: brutal, like nothing we've ever done before
1995// - H1: routine copy and paste from webAudio implementations
1996// - H2: much like exposing to scengraph rendering - same deal
1997//