node free; pitch shift; time scale

This commit is contained in:
John Alanbrook 2023-11-29 04:48:32 +00:00
parent e092599816
commit f47b6ece21
12 changed files with 590 additions and 54 deletions

View file

@ -76,7 +76,10 @@ else
endif
endif
CPPFLAGS += -DHAVE_CEIL -DCP_USE_CGTYPES=0 -DCP_USE_DOUBLES=0 -DTINYSPLINE_FLOAT_PRECISION -DHAVE_FLOOR -DHAVE_FMOD -DHAVE_LRINT -DHAVE_LRINTF $(includeflag) -MD $(WARNING_FLAGS) -I. -DVER=\"$(VER)\" -DINFO=\"$(INFO)\"
CPPFLAGS += -DHAVE_CEIL -DCP_USE_CGTYPES=0 -DCP_USE_DOUBLES=0 -DTINYSPLINE_FLOAT_PRECISION -DHAVE_FLOOR -DHAVE_FMOD -DHAVE_LRINT -DHAVE_LRINTF $(includeflag) -MD $(WARNING_FLAGS) -I. -DVER=\"$(VER)\" -DINFO=\"$(INFO)\" -DENABLE_SINC_MEDIUM_CONVERTER -DENABLE_SINC_FAST_CONVERTER
# ENABLE_SINC_[BEST|FAST|MEDIUM]_CONVERTER
# default, fast and medium available in game at runtime; best available in editor
PKGCMD = tar --directory $(BIN) --exclude="./*.a" --exclude="./obj" -czf $(DISTDIR)/$(DIST) .
ZIP = .tar.gz

30
docs/sound.md Normal file
View file

@ -0,0 +1,30 @@
# SOUND
Primum's sound system is well integrated and robust. It comes with flexibility in mind, to enable sounds to easily change as the result of occuring gameplay.
At the highest level, audio sources feed into a series of nodes, which eventually output to the mixer. All DSP is handled as floating point.
The game should specify a SAMPLERATE, BUF_FRAMES, and CHANNELS.
SAMPLERATE: samples per second
CHANNELS: number of output channels.
BUF_FRAMES: Number of frames to keep in the buffer. Smaller is lower latency. Too small might mean choppy audio.
## Audio sources
Audio sources include wav files, midi files, mod files, etc.
## Audio instances
Audio sources can be played multiple times after loaded into the game. Each instance
## DSPs / nodes
Nodes are how audio ends up coming out of the player's speakers. Only one node is specified at runtime: the master node, which outputs its inputs to the computers' speakers. It has 256 inputs.
Each node can have any number of inputs to it, defined when the node is created.
Nodes are run starting with the master node, in a pull fashion. The master node checks for inputs, and from each input pulls audio foward.
Audio instances are a type of node with a single output, and can be fed directly into the master node.
A node has inputs and outputs. All inputs into a node are mixed together, and then processed by the node. The node might clip them, apply a filter, or do any other number of things.
## Scripting

View file

@ -7,9 +7,11 @@ var dsp_node = {
id: undefined,
get volume() { return cmd(175, this.id); },
set volume(x) { cmd(176, this.id,x); },
set volume(x) { x = Math.clamp(x,0,1); cmd(176, this.id,x); },
get db() { return 20*Math.log10(Math.abs(this.volume)); },
set db(x) { x = Math.clamp(x,-100,0); this.volume = Math.pow(10, x/20); },
get pan() { return cmd(178,this.id); },
set pan(x) { cmd(179,this.id,x); },
set pan(x) { x = Math.clamp(x,-1,1); cmd(179,this.id,x); },
off(t) { cmd(183, this.id, t); }, /* True to turn off */
bypass(t) { cmd(184, this.id, t); }, /* True to bypass filter effect */
unplug() { cmd(164, this.id); },
@ -26,6 +28,8 @@ var dsp_source = Object.copy(dsp_node,{
end(){},
get loop() { return cmd(194,this.id); },
set loop(x) { cmd(195,this.id, x);},
get timescale() { return cmd(201,this.id); },
set timescale(x) { cmd(202,this.id,x); },
get frame() { return cmd(196,this.id); },
set frame(x) { cmd(199, this.id, x); },
frames() { return cmd(197,this.id); },
@ -68,6 +72,14 @@ var DSP = {
delay(secs,decay) {
return dsp_node.make(cmd(185, secs, decay));
},
fwd_delay(secs, decay) {
return dsp_node.make(cmd(207,secs,decay));
},
allpass(secs, decay) {
var fwd = DSP.fwd_delay(secs,-decay);
var fbk = DSP.delay(secs,decay);
fwd.plugin(fbk);
},
lpf(f) {
return dsp_node.make(cmd(186,f));
},
@ -77,6 +89,9 @@ var DSP = {
mod(path) {
return dsp_node.make(cmd(188,path));
},
midi(midi,sf) {
return dsp_node.make(cmd(206,midi,sf));
},
crush(rate, depth) {
return dsp_node.make(cmd(189,rate,depth));
},
@ -89,6 +104,18 @@ var DSP = {
noise_gate(floor) {
return dsp_node.make(cmd(192,floor));
},
pitchshift(octaves) {
return dsp_node.make(cmd(200,octaves));
},
noise() {
return dsp_node.make(cmd(203));
},
pink() {
return dsp_node.make(cmd(204));
},
red() {
return dsp_node.make(cmd(205));
},
};
Sound.bus.master = dsp_node.make(cmd(180));

View file

@ -75,7 +75,7 @@ struct datastream *ds_openvideo(const char *path)
});
ds->ring = ringnew(ds->ring, 8192);
plugin_node(make_node(ds, soundstream_fillbuf), masterbus);
plugin_node(make_node(ds, soundstream_fillbuf, NULL), masterbus);
plm_set_video_decode_callback(ds->plm, render_frame, ds);
plm_set_audio_decode_callback(ds->plm, render_audio, ds);

View file

@ -1218,7 +1218,7 @@ JSValue duk_cmd(JSContext *js, JSValueConst this, int argc, JSValueConst *argv)
ret = ptr2js(masterbus);
break;
case 181:
ret = ptr2js(make_node(NULL,NULL));
ret = ptr2js(make_node(NULL,NULL,NULL));
break;
case 182:
str = js2str(argv[1]);
@ -1277,6 +1277,33 @@ JSValue duk_cmd(JSContext *js, JSValueConst this, int argc, JSValueConst *argv)
case 199:
((sound*)((dsp_node*)js2ptr(argv[1]))->data)->frame = js2number(argv[2]);
break;
case 200:
ret = ptr2js(dsp_pitchshift(js2number(argv[1])));
break;
case 201:
ret = num2js(((sound*)((dsp_node*)js2ptr(argv[1]))->data)->timescale);
break;
case 202:
YughWarn("%g", js2number(argv[2]));
((sound*)((dsp_node*)js2ptr(argv[1]))->data)->timescale = js2number(argv[2]);
break;
case 203:
ret = ptr2js(dsp_whitenoise());
break;
case 204:
ret = ptr2js(dsp_pinknoise());
break;
case 205:
ret = ptr2js(dsp_rednoise());
break;
case 206:
str = js2str(argv[1]);
str2 = js2str(argv[2]);
ret = ptr2js(dsp_midi(str, make_soundfont(str2)));
break;
case 207:
ret = ptr2js(dsp_fwd_delay(js2number(argv[1]), js2number(argv[2])));
break;
}
if (str)

View file

@ -7,11 +7,22 @@
#include "iir.h"
#include "log.h"
#include "stb_ds.h"
#include "smbPitchShift.h"
#define PI 3.14159265
dsp_node *masterbus = NULL;
void iir_free(struct dsp_iir *iir)
{
free(iir->a);
free(iir->b);
free(iir->x);
free(iir->y);
free(iir);
}
void interleave(soundbyte *a, soundbyte *b, soundbyte *stereo, int frames)
{
for (int i = 0; i < frames; i++) {
@ -20,6 +31,13 @@ void interleave(soundbyte *a, soundbyte *b, soundbyte *stereo, int frames)
}
}
void deinterleave(soundbyte *stereo, soundbyte *out, int frames, int channels, int chout)
{
chout--;
for (int i = 0; i < frames; i++)
out[i] = stereo[i*channels+chout];
}
void mono_to_stero(soundbyte *a, soundbyte *stereo, int frames)
{
interleave(a,a,stereo, frames);
@ -37,7 +55,7 @@ void mono_expand(soundbyte *buffer, int to, int frames)
dsp_node *dsp_mixer_node()
{
return make_node(NULL, NULL);
return make_node(NULL, NULL, NULL);
}
void dsp_init()
@ -75,7 +93,7 @@ void filter_am_mod(dsp_node *mod, soundbyte *buffer, int frames)
dsp_node *dsp_am_mod(dsp_node *mod)
{
return make_node(mod, filter_am_mod);
return make_node(mod, filter_am_mod, node_free);
}
/* Add b into a */
@ -120,8 +138,7 @@ void dsp_node_run(dsp_node *node)
}
}
dsp_node *make_node(void *data, void (*proc)(void *in, soundbyte *out, int samples))
dsp_node *make_node(void *data, void (*proc)(void *data, soundbyte *out, int samples), void (*fr)(void *data))
{
dsp_node *self = malloc(sizeof(dsp_node));
memset(self, 0, sizeof(*self));
@ -206,7 +223,7 @@ dsp_node *dsp_phasor(float amp, float freq, float (*filter)(float))
p->freq = freq;
p->phase = 0;
p->filter = filter;
return make_node(p, filter_phasor);
return make_node(p, filter_phasor, NULL);
}
void filter_rectify(void *data, soundbyte *out, int n)
@ -216,7 +233,7 @@ void filter_rectify(void *data, soundbyte *out, int n)
dsp_node *dsp_rectify()
{
return make_node(NULL, filter_rectify);
return make_node(NULL, filter_rectify, NULL);
}
soundbyte sample_whitenoise()
@ -232,32 +249,26 @@ void gen_whitenoise(void *data, soundbyte *out, int n)
dsp_node *dsp_whitenoise()
{
return make_node(NULL, gen_whitenoise);
return make_node(NULL, gen_whitenoise, NULL);
}
void gen_pinknoise(void *data, soundbyte *out, int n)
void gen_pinknoise(struct dsp_iir *pink, soundbyte *out, int n)
{
double a[7] = {1.0, 0.0555179, 0.0750759, 0.1538520, 0.3104856, 0.5329522, 0.0168980};
double b[7] = {0.99886, 0.99332, 0.969, 0.8665, 0.55, -0.7616, 0.115926};
gen_whitenoise(NULL, out, n);
for (int i = 0; i < n; i++) {
double pink;
double white = sample_whitenoise();
for (int k = 0; k < 5; k++) {
b[k] = a[k]*b[k] + white * b[k];
pink += b[k];
}
pink += b[5] + white*0.5362;
b[5] = white*0.115926;
out[i] = pink;
for (int i = 0; i < n*CHANNELS; i++) {
soundbyte sum = 0;
for (int j = 0; j < 6; j++) {
pink->x[j] = pink->x[j]*pink->b[j] + out[i]*pink->a[j];
sum += pink->x[j];
}
mono_expand(out,CHANNELS,n);
/*
* The above is a loopified version of this
* https://www.firstpr.com.au/dsp/pink-noise/
pink->x[6] = out[i] * 0.115926;
out[i] = sum + out[i] * 0.5362 + pink->x[6];
out[i] *= 0.11;
}
/* * https://www.firstpr.com.au/dsp/pink-noise/
b0 = 0.99886 * b0 + white * 0.0555179;
b1 = 0.99332 * b1 + white * 0.0750759;
b2 = 0.96900 * b2 + white * 0.1538520;
@ -271,7 +282,69 @@ void gen_pinknoise(void *data, soundbyte *out, int n)
dsp_node *dsp_pinknoise()
{
return make_node(NULL, gen_pinknoise);
struct dsp_iir *pink = malloc(sizeof(*pink));
*pink = make_iir(6);
float pinka[7] = {0.0555179, 0.0750759, 0.1538520, 0.3104856, 0.5329522, -0.0168980, 0.115926};
float pinkb[7] = {0.99886, 0.99332, 0.969, 0.8665, 0.55, -0.7616, 0.115926};
memcpy(pink->a, pinka, 7*sizeof(float));
memcpy(pink->b, pinkb, 7*sizeof(float));
return make_node(pink, gen_pinknoise, iir_free);
}
void filter_rednoise(soundbyte *last, soundbyte *out, int frames)
{
gen_whitenoise(NULL, out, frames);
for (int i = 0; i < frames*CHANNELS; i++) {
out[i] = (*last + (0.02*out[i]))/1.02;
*last = out[i];
out[i] *= 3.5;
}
}
dsp_node *dsp_rednoise()
{
soundbyte *last = malloc(sizeof(soundbyte));
*last = 0;
return make_node(last,filter_rednoise, NULL);
}
void filter_pitchshift(float *octaves, soundbyte *buffer, int frames)
{
soundbyte ch1[frames];
for (int i = 0; i < frames; i++)
ch1[i] = buffer[i*CHANNELS];
smbPitchShift(*octaves, frames, 1024, 4, SAMPLERATE, ch1, buffer);
mono_expand(buffer, CHANNELS, frames);
}
dsp_node *dsp_pitchshift(float octaves)
{
float *oct = malloc(sizeof(float));
*oct = octaves;
return make_node(oct, filter_pitchshift, NULL);
}
struct timescale
{
float rate;
SRC_STATE *src;
};
static long *src_cb(struct timescale *ts, float **data)
{
}
void filter_timescale(struct timescale *ts, soundbyte *buffer, int frames)
{
}
dsp_node *dsp_timescale(float scale)
{
struct timescale *ts = malloc(sizeof(*ts));
ts->rate = scale;
ts->src = src_callback_new(src_cb, SRC_SINC_FASTEST, scale, NULL, ts);
return make_node(ts, filter_timescale, NULL);
}
soundbyte iir_filter(struct dsp_iir iir, soundbyte val)
@ -303,18 +376,19 @@ void filter_iir(struct dsp_iir *iir, soundbyte *buffer, int frames)
}
}
dsp_node *dsp_lpf(float freq)
{
struct dsp_iir *iir = malloc(sizeof(*iir));
*iir = bqlp_dcof(2*freq/SAMPLERATE, 5);
return make_node(iir, filter_iir);
return make_node(iir, filter_iir, iir_free);
}
dsp_node *dsp_hpf(float freq)
{
struct dsp_iir *iir = malloc(sizeof(*iir));
*iir = bqhp_dcof(2*freq/SAMPLERATE,5);
return make_node(iir, filter_iir);
return make_node(iir, filter_iir, iir_free);
}
void filter_delay(delay *d, soundbyte *buf, int frames)
@ -325,6 +399,12 @@ void filter_delay(delay *d, soundbyte *buf, int frames)
}
}
void delay_free(delay *d)
{
ringfree(d->ring);
free(d);
}
dsp_node *dsp_delay(double sec, double decay)
{
delay *d = malloc(sizeof(*d));
@ -333,7 +413,27 @@ dsp_node *dsp_delay(double sec, double decay)
d->ring = NULL;
d->ring = ringnew(d->ring, sec*CHANNELS*SAMPLERATE*2); /* Circular buffer size is enough to have the delay */
ringheader(d->ring)->write += CHANNELS*SAMPLERATE*sec;
return make_node(d, filter_delay);
return make_node(d, filter_delay, delay_free);
}
void filter_fwd_delay(delay *d, soundbyte *buf, int frames)
{
for (int i = 0; i < frames*CHANNELS; i++) {
ringpush(d->ring, buf[i]);
buf[i] += ringshift(d->ring)*d->decay;
}
}
dsp_node *dsp_fwd_delay(double sec, double decay)
{
delay *d = malloc(sizeof(*d));
d-> ms_delay = sec;
d->decay = decay;
d->ring = NULL;
d->ring = ringnew(d->ring, sec*CHANNELS*SAMPLERATE*2);
ringheader(d->ring)->write += CHANNELS*SAMPLERATE*sec;
YughWarn("FWD DELAY");
return make_node(d, filter_fwd_delay, delay_free);
}
/* Get decay constant for a given pole */
@ -407,7 +507,7 @@ dsp_node *dsp_adsr(unsigned int atk, unsigned int dec, unsigned int sus, unsigne
adsr->rls = rls + adsr->sus;
adsr->rls_t = tau2pole(rls / 3000.f);
return make_node(adsr, dsp_adsr_fillbuf);
return make_node(adsr, dsp_adsr_fillbuf, NULL);
}
void filter_noise_gate(float *floor, soundbyte *out, int frames)
@ -419,7 +519,7 @@ dsp_node *dsp_noise_gate(float floor)
{
float *v = malloc(sizeof(float));
*v = floor;
return make_node(v, filter_noise_gate);
return make_node(v, filter_noise_gate, NULL);
}
void filter_limiter(float *ceil, soundbyte *out, int n)
@ -431,7 +531,7 @@ dsp_node *dsp_limiter(float ceil)
{
float *v = malloc(sizeof(float));
*v = ceil;
return make_node(v, filter_limiter);
return make_node(v, filter_limiter, NULL);
}
void dsp_compressor_fillbuf(struct dsp_compressor *comp, soundbyte *out, int n)
@ -470,22 +570,21 @@ dsp_node *dsp_compressor()
struct dsp_compressor *c = malloc(sizeof(*c));
*c = new;
return make_node(c, dsp_compressor_fillbuf);
return make_node(c, dsp_compressor_fillbuf, NULL);
}
/* Assumes 2 channels in a frame */
void pan_frames(soundbyte *out, float deg, int frames)
{
if (deg == 0.f) return;
if (deg < -100) deg = -100.f;
else if (deg > 100) deg = 100.f;
if (deg < -1) deg = -1.f;
else if (deg > 1) deg = 1.f;
float db1, db2;
float pct = deg / 100.f;
if (deg > 0) {
db1 = pct2db(1 - pct);
db2 = pct2db(pct);
db1 = pct2db(1 - deg);
db2 = pct2db(deg);
for (int i = 0; i < frames; i++) {
soundbyte L = out[i*2];
soundbyte R = out[i*2+1];
@ -493,8 +592,8 @@ void pan_frames(soundbyte *out, float deg, int frames)
out[i*2+1] = (R + fgain(L, db2))/2;
}
} else {
db1 = pct2db(1 + pct);
db2 = pct2db(-1*pct);
db1 = pct2db(1 + deg);
db2 = pct2db(-1*deg);
for (int i = 0; i < frames; i++) {
soundbyte L = out[i*2];
soundbyte R = out[i*2+1];
@ -544,5 +643,5 @@ dsp_node *dsp_bitcrush(float sr, float res)
struct bitcrush *b = malloc(sizeof(*b));
b->sr = sr;
b->depth = res;
return make_node(b, filter_bitcrush);
return make_node(b, filter_bitcrush, NULL);
}

View file

@ -8,6 +8,7 @@
#include "sound.h"
#include "cbuf.h"
#include "script.h"
#include "iir.h"
/* a DSP node, when processed, sums its inputs, and stores the result of proc in its cache */
typedef struct dsp_node {
@ -28,10 +29,11 @@ void dsp_init();
/* Get the output of a node */
soundbyte *dsp_node_out(dsp_node *node);
void dsp_node_run(dsp_node *node);
dsp_node *make_node(void *data, void (*proc)(void *data, soundbyte *out, int samples));
dsp_node *make_node(void *data, void (*proc)(void *data, soundbyte *out, int samples), void (*fr)(void *data));
void plugin_node(dsp_node *from, dsp_node *to);
void unplug_node(dsp_node *node);
void node_free(dsp_node *node);
void filter_iir(struct dsp_iir *iir, soundbyte *buffer, int frames);
void scale_soundbytes(soundbyte *a, float scale, int frames);
void sum_soundbytes(soundbyte *a, soundbyte *b, int frames);
@ -71,6 +73,8 @@ typedef struct {
} delay;
dsp_node *dsp_delay(double sec, double decay);
dsp_node *dsp_fwd_delay(double sec, double decay);
dsp_node *dsp_pitchshift(float octaves);
struct dsp_compressor {
double ratio;
@ -91,6 +95,7 @@ struct phasor phasor_make(unsigned int sr, float freq);
dsp_node *dsp_whitenoise();
dsp_node *dsp_pinknoise();
dsp_node *dsp_rednoise();
float sin_phasor(float p);
float square_phasor(float p);

View file

@ -40,6 +40,7 @@ struct dsp_iir sp_hp(double fcf);
double chevy_pct_to_e(double pct);
struct dsp_iir make_iir(int order);
struct dsp_iir bqlp_dcof(double fcf, float Q);
struct dsp_iir bqhp_dcof(double fcf, float Q);
struct dsp_iir bqbpq_dcof(double fcf, float Q);

View file

@ -74,6 +74,13 @@ tsf *make_soundfont(const char *path)
return sf;
}
void dsp_midi_free(struct dsp_midi_song *ms)
{
free(ms->midi);
tsf_close(ms->sf);
free(ms);
}
dsp_node *dsp_midi(const char *midi, tsf *sf)
{
long rawlen;
@ -82,7 +89,8 @@ dsp_node *dsp_midi(const char *midi, tsf *sf)
ms->time = 0.0;
ms->midi = tml_load_memory(raw, rawlen);
ms->sf = tsf_copy(sf);
return make_node(ms, dsp_midi_fillbuf);
free(midi);
return make_node(ms, dsp_midi_fillbuf, dsp_midi_free);
}
void play_song(const char *midi, const char *sf)

View file

@ -0,0 +1,317 @@
/****************************************************************************
*
* NAME: smbPitchShift.c
* VERSION: 1.2
* HOME URL: http://blogs.zynaptiq.com/bernsee
* KNOWN BUGS: none
*
* SYNOPSIS: Routine for doing pitch shifting while maintaining
* duration using the Short Time Fourier Transform.
*
* DESCRIPTION: The routine takes a pitchShift factor value which is between 0.5
* (one octave down) and 2. (one octave up). A value of exactly 1 does not change
* the pitch. numSampsToProcess tells the routine how many samples in indata[0...
* numSampsToProcess-1] should be pitch shifted and moved to outdata[0 ...
* numSampsToProcess-1]. The two buffers can be identical (ie. it can process the
* data in-place). fftFrameSize defines the FFT frame size used for the
* processing. Typical values are 1024, 2048 and 4096. It may be any value <=
* MAX_FRAME_LENGTH but it MUST be a power of 2. osamp is the STFT
* oversampling factor which also determines the overlap between adjacent STFT
* frames. It should at least be 4 for moderate scaling ratios. A value of 32 is
* recommended for best quality. sampleRate takes the sample rate for the signal
* in unit Hz, ie. 44100 for 44.1 kHz audio. The data passed to the routine in
* indata[] should be in the range [-1.0, 1.0), which is also the output range
* for the data, make sure you scale the data accordingly (for 16bit signed integers
* you would have to divide (and multiply) by 32768).
*
* COPYRIGHT 1999-2015 Stephan M. Bernsee <s.bernsee [AT] zynaptiq [DOT] com>
*
* The Wide Open License (WOL)
*
* Permission to use, copy, modify, distribute and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice and this license appear in all source copies.
* THIS SOFTWARE IS PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF
* ANY KIND. See http://www.dspguru.com/wol.htm for more information.
*
*****************************************************************************/
#include <string.h>
#include <math.h>
#include <stdio.h>
#define M_PI 3.14159265358979323846
#define MAX_FRAME_LENGTH 8192
#define false 0
#define true 1
void smbFft(float *fftBuffer, long fftFrameSize, long sign);
double smbAtan2(double x, double y);
// -----------------------------------------------------------------------------------------------------------------
void smbPitchShift(float pitchShift, long numSampsToProcess, long fftFrameSize, long osamp, float sampleRate, float *indata, float *outdata)
/*
Routine smbPitchShift(). See top of file for explanation
Purpose: doing pitch shifting while maintaining duration using the Short
Time Fourier Transform.
Author: (c)1999-2015 Stephan M. Bernsee <s.bernsee [AT] zynaptiq [DOT] com>
*/
{
static float gInFIFO[MAX_FRAME_LENGTH];
static float gOutFIFO[MAX_FRAME_LENGTH];
static float gFFTworksp[2*MAX_FRAME_LENGTH];
static float gLastPhase[MAX_FRAME_LENGTH/2+1];
static float gSumPhase[MAX_FRAME_LENGTH/2+1];
static float gOutputAccum[2*MAX_FRAME_LENGTH];
static float gAnaFreq[MAX_FRAME_LENGTH];
static float gAnaMagn[MAX_FRAME_LENGTH];
static float gSynFreq[MAX_FRAME_LENGTH];
static float gSynMagn[MAX_FRAME_LENGTH];
static long gRover = false, gInit = false;
double magn, phase, tmp, window, real, imag;
double freqPerBin, expct;
long i,k, qpd, index, inFifoLatency, stepSize, fftFrameSize2;
/* set up some handy variables */
fftFrameSize2 = fftFrameSize/2;
stepSize = fftFrameSize/osamp;
freqPerBin = sampleRate/(double)fftFrameSize;
expct = 2.*M_PI*(double)stepSize/(double)fftFrameSize;
inFifoLatency = fftFrameSize-stepSize;
if (gRover == false) gRover = inFifoLatency;
/* initialize our static arrays */
if (gInit == false) {
memset(gInFIFO, 0, MAX_FRAME_LENGTH*sizeof(float));
memset(gOutFIFO, 0, MAX_FRAME_LENGTH*sizeof(float));
memset(gFFTworksp, 0, 2*MAX_FRAME_LENGTH*sizeof(float));
memset(gLastPhase, 0, (MAX_FRAME_LENGTH/2+1)*sizeof(float));
memset(gSumPhase, 0, (MAX_FRAME_LENGTH/2+1)*sizeof(float));
memset(gOutputAccum, 0, 2*MAX_FRAME_LENGTH*sizeof(float));
memset(gAnaFreq, 0, MAX_FRAME_LENGTH*sizeof(float));
memset(gAnaMagn, 0, MAX_FRAME_LENGTH*sizeof(float));
gInit = true;
}
/* main processing loop */
for (i = 0; i < numSampsToProcess; i++){
/* As long as we have not yet collected enough data just read in */
gInFIFO[gRover] = indata[i];
outdata[i] = gOutFIFO[gRover-inFifoLatency];
gRover++;
/* now we have enough data for processing */
if (gRover >= fftFrameSize) {
gRover = inFifoLatency;
/* do windowing and re,im interleave */
for (k = 0; k < fftFrameSize;k++) {
window = -.5*cos(2.*M_PI*(double)k/(double)fftFrameSize)+.5;
gFFTworksp[2*k] = gInFIFO[k] * window;
gFFTworksp[2*k+1] = 0.;
}
/* ***************** ANALYSIS ******************* */
/* do transform */
smbFft(gFFTworksp, fftFrameSize, -1);
/* this is the analysis step */
for (k = 0; k <= fftFrameSize2; k++) {
/* de-interlace FFT buffer */
real = gFFTworksp[2*k];
imag = gFFTworksp[2*k+1];
/* compute magnitude and phase */
magn = 2.*sqrt(real*real + imag*imag);
phase = atan2(imag,real);
/* compute phase difference */
tmp = phase - gLastPhase[k];
gLastPhase[k] = phase;
/* subtract expected phase difference */
tmp -= (double)k*expct;
/* map delta phase into +/- Pi interval */
qpd = tmp/M_PI;
if (qpd >= 0) qpd += qpd&1;
else qpd -= qpd&1;
tmp -= M_PI*(double)qpd;
/* get deviation from bin frequency from the +/- Pi interval */
tmp = osamp*tmp/(2.*M_PI);
/* compute the k-th partials' true frequency */
tmp = (double)k*freqPerBin + tmp*freqPerBin;
/* store magnitude and true frequency in analysis arrays */
gAnaMagn[k] = magn;
gAnaFreq[k] = tmp;
}
/* ***************** PROCESSING ******************* */
/* this does the actual pitch shifting */
memset(gSynMagn, 0, fftFrameSize*sizeof(float));
memset(gSynFreq, 0, fftFrameSize*sizeof(float));
for (k = 0; k <= fftFrameSize2; k++) {
index = k*pitchShift;
if (index <= fftFrameSize2) {
gSynMagn[index] += gAnaMagn[k];
gSynFreq[index] = gAnaFreq[k] * pitchShift;
}
}
/* ***************** SYNTHESIS ******************* */
/* this is the synthesis step */
for (k = 0; k <= fftFrameSize2; k++) {
/* get magnitude and true frequency from synthesis arrays */
magn = gSynMagn[k];
tmp = gSynFreq[k];
/* subtract bin mid frequency */
tmp -= (double)k*freqPerBin;
/* get bin deviation from freq deviation */
tmp /= freqPerBin;
/* take osamp into account */
tmp = 2.*M_PI*tmp/osamp;
/* add the overlap phase advance back in */
tmp += (double)k*expct;
/* accumulate delta phase to get bin phase */
gSumPhase[k] += tmp;
phase = gSumPhase[k];
/* get real and imag part and re-interleave */
gFFTworksp[2*k] = magn*cos(phase);
gFFTworksp[2*k+1] = magn*sin(phase);
}
/* zero negative frequencies */
for (k = fftFrameSize+2; k < 2*fftFrameSize; k++) gFFTworksp[k] = 0.;
/* do inverse transform */
smbFft(gFFTworksp, fftFrameSize, 1);
/* do windowing and add to output accumulator */
for(k=0; k < fftFrameSize; k++) {
window = -.5*cos(2.*M_PI*(double)k/(double)fftFrameSize)+.5;
gOutputAccum[k] += 2.*window*gFFTworksp[2*k]/(fftFrameSize2*osamp);
}
for (k = 0; k < stepSize; k++) gOutFIFO[k] = gOutputAccum[k];
/* shift accumulator */
memmove(gOutputAccum, gOutputAccum+stepSize, fftFrameSize*sizeof(float));
/* move input FIFO */
for (k = 0; k < inFifoLatency; k++) gInFIFO[k] = gInFIFO[k+stepSize];
}
}
}
// -----------------------------------------------------------------------------------------------------------------
void smbFft(float *fftBuffer, long fftFrameSize, long sign)
/*
FFT routine, (C)1996 S.M.Bernsee. Sign = -1 is FFT, 1 is iFFT (inverse)
Fills fftBuffer[0...2*fftFrameSize-1] with the Fourier transform of the
time domain data in fftBuffer[0...2*fftFrameSize-1]. The FFT array takes
and returns the cosine and sine parts in an interleaved manner, ie.
fftBuffer[0] = cosPart[0], fftBuffer[1] = sinPart[0], asf. fftFrameSize
must be a power of 2. It expects a complex input signal (see footnote 2),
ie. when working with 'common' audio signals our input signal has to be
passed as {in[0],0.,in[1],0.,in[2],0.,...} asf. In that case, the transform
of the frequencies of interest is in fftBuffer[0...fftFrameSize].
*/
{
float wr, wi, arg, *p1, *p2, temp;
float tr, ti, ur, ui, *p1r, *p1i, *p2r, *p2i;
long i, bitm, j, le, le2, k;
for (i = 2; i < 2*fftFrameSize-2; i += 2) {
for (bitm = 2, j = 0; bitm < 2*fftFrameSize; bitm <<= 1) {
if (i & bitm) j++;
j <<= 1;
}
if (i < j) {
p1 = fftBuffer+i; p2 = fftBuffer+j;
temp = *p1; *(p1++) = *p2;
*(p2++) = temp; temp = *p1;
*p1 = *p2; *p2 = temp;
}
}
for (k = 0, le = 2; k < (long)(log(fftFrameSize)/log(2.)+.5); k++) {
le <<= 1;
le2 = le>>1;
ur = 1.0;
ui = 0.0;
arg = M_PI / (le2>>1);
wr = cos(arg);
wi = sign*sin(arg);
for (j = 0; j < le2; j += 2) {
p1r = fftBuffer+j; p1i = p1r+1;
p2r = p1r+le2; p2i = p2r+1;
for (i = j; i < 2*fftFrameSize; i += le) {
tr = *p2r * ur - *p2i * ui;
ti = *p2r * ui + *p2i * ur;
*p2r = *p1r - tr; *p2i = *p1i - ti;
*p1r += tr; *p1i += ti;
p1r += le; p1i += le;
p2r += le; p2i += le;
}
tr = ur*wr - ui*wi;
ui = ur*wi + ui*wr;
ur = tr;
}
}
}
// -----------------------------------------------------------------------------------------------------------------
/*
12/12/02, smb
PLEASE NOTE:
There have been some reports on domain errors when the atan2() function was used
as in the above code. Usually, a domain error should not interrupt the program flow
(maybe except in Debug mode) but rather be handled "silently" and a global variable
should be set according to this error. However, on some occasions people ran into
this kind of scenario, so a replacement atan2() function is provided here.
If you are experiencing domain errors and your program stops, simply replace all
instances of atan2() with calls to the smbAtan2() function below.
*/
double smbAtan2(double x, double y)
{
double signx;
if (x > 0.) signx = 1.;
else signx = -1.;
if (x == 0.) return 0.;
if (y == 0.) return signx * M_PI / 2.;
return atan2(x, y);
}
// -----------------------------------------------------------------------------------------------------------------
// -----------------------------------------------------------------------------------------------------------------
// -----------------------------------------------------------------------------------------------------------------

View file

@ -121,7 +121,7 @@ dsp_node *dsp_mod(const char *path)
void *data = slurp_file(path, &modsize);
pocketmod_context *mod = malloc(sizeof(*mod));
pocketmod_init(mod, data, modsize, SAMPLERATE);
return make_node(mod, filter_mod);
return make_node(mod, filter_mod, NULL);
}
void sound_init() {
@ -241,13 +241,19 @@ void sound_fillbuf(struct sound *s, soundbyte *buf, int n) {
frames = n;
else
end = 1;
if (s->timescale != 1) {
src_callback_read(s->src, s->timescale, frames, buf);
return;
}
soundbyte *in = s->data->data;
for (int i = 0; i < frames; i++) {
for (int j = 0; j < CHANNELS; j++)
buf[i * CHANNELS + j] = in[s->frame*CHANNELS + j];
s->frame++;
s->frame++;
}
if(end) {
@ -260,18 +266,28 @@ void sound_fillbuf(struct sound *s, soundbyte *buf, int n) {
void free_source(struct sound *s)
{
JS_FreeValue(js, s->hook);
src_delete(s->src);
free(s);
}
static long *src_cb(struct sound *s, float **data)
{
long needed = BUF_FRAMES/s->timescale;
*data = s->data->data+s->frame;
s->frame += needed;
return needed;
}
struct dsp_node *dsp_source(char *path)
{
struct sound *self = malloc(sizeof(*self));
self->frame = 0;
self->data = make_sound(path);
self->loop = false;
self->src = src_callback_new(src_cb, SRC_SINC_MEDIUM_QUALITY, 2, NULL, self);
self->timescale = 1;
self->hook = JS_UNDEFINED;
dsp_node *n = make_node(self, sound_fillbuf);
n->data_free = free_source;
dsp_node *n = make_node(self, sound_fillbuf, free_source);
return n;
}

View file

@ -2,6 +2,7 @@
#define SOUND_H
#include "script.h"
#include "samplerate.h"
typedef float soundbyte;
@ -12,6 +13,8 @@ typedef struct sound {
unsigned int frame; /* Pointing to the current frame on the wav */
struct wav *data;
int loop;
float timescale;
SRC_STATE *src;
JSValue hook;
} sound;