🍻 Fixed-Time Motion integration (#25719)

This commit is contained in:
Scott Lahteine 2023-06-22 02:54:21 -05:00 committed by GitHub
parent 2dc76689ea
commit 8c9172cf5d
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
11 changed files with 608 additions and 604 deletions

View file

@ -44,17 +44,19 @@ template <class L, class R> struct IF<true, L, R> { typedef L type; };
#define NUM_AXIS_ARRAY_1(V) { NUM_AXIS_LIST_1(V) }
#define NUM_AXIS_ARGS(T) NUM_AXIS_LIST(T x, T y, T z, T i, T j, T k, T u, T v, T w)
#define NUM_AXIS_ELEM(O) NUM_AXIS_LIST(O.x, O.y, O.z, O.i, O.j, O.k, O.u, O.v, O.w)
#define NUM_AXIS_DEFS(T,V) NUM_AXIS_LIST(T x=V, T y=V, T z=V, T i=V, T j=V, T k=V, T u=V, T v=V, T w=V)
#define NUM_AXIS_DECL(T,V) NUM_AXIS_LIST(T x=V, T y=V, T z=V, T i=V, T j=V, T k=V, T u=V, T v=V, T w=V)
#define MAIN_AXIS_NAMES NUM_AXIS_LIST(X, Y, Z, I, J, K, U, V, W)
#define STR_AXES_MAIN NUM_AXIS_GANG("X", "Y", "Z", STR_I, STR_J, STR_K, STR_U, STR_V, STR_W)
#if NUM_AXES
#define NUM_AXES_SEP ,
#define MAIN_AXIS_MAP(F) MAP(F, MAIN_AXIS_NAMES)
#define OPTARGS_NUM(T) , NUM_AXIS_ARGS(T)
#define OPTARGS_LOGICAL(T) , LOGICAL_AXIS_ARGS(T)
#else
#define NUM_AXES_SEP
#define MAIN_AXIS_MAP(F)
#define OPTARGS_NUM(T)
#define OPTARGS_LOGICAL(T)
#endif
@ -531,8 +533,8 @@ template<typename T>
struct XYZval {
union {
#if NUM_AXES
struct { T NUM_AXIS_ARGS(); };
struct { T NUM_AXIS_LIST(a, b, c, _i, _j, _k, _u, _v, _w); };
struct { NUM_AXIS_CODE(T x, T y, T z, T i, T j, T k, T u, T v, T w); };
struct { NUM_AXIS_CODE(T a, T b, T c, T _i, T _j, T _k, T _u, T _v, T _w); };
#endif
T pos[NUM_AXES];
};
@ -820,6 +822,101 @@ struct XYZEval {
#include <string.h> // for memset
template<typename T, int SIZE>
struct XYZarray {
typedef T el[SIZE];
union {
el data[LOGICAL_AXES];
struct { NUM_AXIS_CODE(T x, T y, T z, T i, T j, T k, T u, T v, T w); };
struct { NUM_AXIS_CODE(T a, T b, T c, T _i, T _j, T _k, T _u, T _v, T _w); };
};
FI void reset() { ZERO(data); }
FI void set(const int n, const XYval<T> p) { NUM_AXIS_CODE(x[n]=p.x, y[n]=p.y,,,,,,,); }
FI void set(const int n, const XYZval<T> p) { NUM_AXIS_CODE(x[n]=p.x, y[n]=p.y, z[n]=p.z, i[n]=p.i, j[n]=p.j, k[n]=p.k, u[n]=p.u, v[n]=p.v, w[n]=p.w ); }
FI void set(const int n, const XYZEval<T> p) { NUM_AXIS_CODE(x[n]=p.x, y[n]=p.y, z[n]=p.z, i[n]=p.i, j[n]=p.j, k[n]=p.k, u[n]=p.u, v[n]=p.v, w[n]=p.w ); }
// Setter for all individual args
FI void set(const int n OPTARGS_NUM(const T)) { NUM_AXIS_CODE(a[n] = x, b[n] = y, c[n] = z, _i[n] = i, _j[n] = j, _k[n] = k, _u[n] = u, _v[n] = v, _w[n] = w); }
// Setters with fewer elements leave the rest untouched
#if HAS_Y_AXIS
FI void set(const int n, const T px) { x[n] = px; }
#endif
#if HAS_Z_AXIS
FI void set(const int n, const T px, const T py) { x[n] = px; y[n] = py; }
#endif
#if HAS_I_AXIS
FI void set(const int n, const T px, const T py, const T pz) { x[n] = px; y[n] = py; z[n] = pz; }
#endif
#if HAS_J_AXIS
FI void set(const int n, const T px, const T py, const T pz, const T pi) { x[n] = px; y[n] = py; z[n] = pz; i[n] = pi; }
#endif
#if HAS_K_AXIS
FI void set(const int n, const T px, const T py, const T pz, const T pi, const T pj) { x[n] = px; y[n] = py; z[n] = pz; i[n] = pi; j[n] = pj; }
#endif
#if HAS_U_AXIS
FI void set(const int n, const T px, const T py, const T pz, const T pi, const T pj, const T pk) { x[n] = px; y[n] = py; z[n] = pz; i[n] = pi; j[n] = pj; k[n] = pk; }
#endif
#if HAS_V_AXIS
FI void set(const int n, const T px, const T py, const T pz, const T pi, const T pj, const T pk, const T pu) { x[n] = px; y[n] = py; z[n] = pz; i[n] = pi; j[n] = pj; k[n] = pk; u[n] = pu; }
#endif
#if HAS_W_AXIS
FI void set(const int n, const T px, const T py, const T pz, const T pi, const T pj, const T pk, const T pu, const T pv) { x[n] = px; y[n] = py; z[n] = pz; i[n] = pi; j[n] = pj; k[n] = pk; u[n] = pu; v[n] = pv; }
#endif
FI XYZval<T> operator[](const int n) const { return XYZval<T>(NUM_AXIS_ARRAY(x[n], y[n], z[n], i[n], j[n], k[n], u[n], v[n], w[n])); }
};
template<typename T, int SIZE>
struct XYZEarray {
typedef T el[SIZE];
union {
el data[LOGICAL_AXES];
struct { el LOGICAL_AXIS_ARGS(); };
struct { el LOGICAL_AXIS_LIST(_e, a, b, c, _i, _j, _k, _u, _v, _w); };
};
FI void reset() { ZERO(data); }
FI void set(const int n, const XYval<T> p) { NUM_AXIS_CODE(x[n]=p.x, y[n]=p.y,,,,,,,); }
FI void set(const int n, const XYZval<T> p) { NUM_AXIS_CODE(x[n]=p.x, y[n]=p.y, z[n]=p.z, i[n]=p.i, j[n]=p.j, k[n]=p.k, u[n]=p.u, v[n]=p.v, w[n]=p.w ); }
FI void set(const int n, const XYZEval<T> p) { LOGICAL_AXIS_CODE(e[n]=p.e, x[n]=p.x, y[n]=p.y, z[n]=p.z, i[n]=p.i, j[n]=p.j, k[n]=p.k, u[n]=p.u, v[n]=p.v, w[n]=p.w ); }
// Setter for all individual args
FI void set(const int n OPTARGS_NUM(const T)) { NUM_AXIS_CODE(a[n] = x, b[n] = y, c[n] = z, _i[n] = i, _j[n] = j, _k[n] = k, _u[n] = u, _v[n] = v, _w[n] = w); }
#if LOGICAL_AXES > NUM_AXES
FI void set(const int n, LOGICAL_AXIS_ARGS(const T)) { LOGICAL_AXIS_CODE(_e[n] = e, a[n] = x, b[n] = y, c[n] = z, _i[n] = i, _j[n] = j, _k[n] = k, _u[n] = u, _v[n] = v, _w[n] = w); }
#endif
// Setters with fewer elements leave the rest untouched
#if HAS_Y_AXIS
FI void set(const int n, const T px) { x[n] = px; }
#endif
#if HAS_Z_AXIS
FI void set(const int n, const T px, const T py) { x[n] = px; y[n] = py; }
#endif
#if HAS_I_AXIS
FI void set(const int n, const T px, const T py, const T pz) { x[n] = px; y[n] = py; z[n] = pz; }
#endif
#if HAS_J_AXIS
FI void set(const int n, const T px, const T py, const T pz, const T pi) { x[n] = px; y[n] = py; z[n] = pz; i[n] = pi; }
#endif
#if HAS_K_AXIS
FI void set(const int n, const T px, const T py, const T pz, const T pi, const T pj) { x[n] = px; y[n] = py; z[n] = pz; i[n] = pi; j[n] = pj; }
#endif
#if HAS_U_AXIS
FI void set(const int n, const T px, const T py, const T pz, const T pi, const T pj, const T pk) { x[n] = px; y[n] = py; z[n] = pz; i[n] = pi; j[n] = pj; k[n] = pk; }
#endif
#if HAS_V_AXIS
FI void set(const int n, const T px, const T py, const T pz, const T pi, const T pj, const T pk, const T pu) { x[n] = px; y[n] = py; z[n] = pz; i[n] = pi; j[n] = pj; k[n] = pk; u[n] = pu; }
#endif
#if HAS_W_AXIS
FI void set(const int n, const T px, const T py, const T pz, const T pi, const T pj, const T pk, const T pu, const T pv) { x[n] = px; y[n] = py; z[n] = pz; i[n] = pi; j[n] = pj; k[n] = pk; u[n] = pu; v[n] = pv; }
#endif
FI XYZEval<T> operator[](const int n) const { return XYZval<T>(LOGICAL_AXIS_ARRAY(e[n], x[n], y[n], z[n], i[n], j[n], k[n], u[n], v[n], w[n])); }
};
class AxisBits;
class AxisBits {
@ -972,6 +1069,8 @@ public:
#undef MSET
FI bool toggle(const AxisEnum n) { TBI(bits, n); return TEST(bits, n); }
FI void bset(const AxisEnum n) { SBI(bits, n); }
FI void bclr(const AxisEnum n) { CBI(bits, n); }
// Accessor via an AxisEnum (or any integer) [index]
FI bool operator[](const int n) const { return TEST(bits, n); }

View file

@ -108,7 +108,7 @@ class Mixer {
}
// Used when dealing with blocks
FORCE_INLINE static void populate_block(mixer_comp_t b_color[MIXING_STEPPERS]) {
FORCE_INLINE static void populate_block(mixer_comp_t (&b_color)[MIXING_STEPPERS]) {
#if ENABLED(GRADIENT_MIX)
if (gradient.enabled) {
MIXER_STEPPER_LOOP(i) b_color[i] = gradient.color[i];
@ -118,7 +118,7 @@ class Mixer {
MIXER_STEPPER_LOOP(i) b_color[i] = color[selected_vtool][i];
}
FORCE_INLINE static void stepper_setup(mixer_comp_t b_color[MIXING_STEPPERS]) {
FORCE_INLINE static void stepper_setup(mixer_comp_t (&b_color)[MIXING_STEPPERS]) {
MIXER_STEPPER_LOOP(i) s_color[i] = b_color[i];
}
@ -231,13 +231,7 @@ class Mixer {
for (;;) {
if (--runner < 0) runner = MIXING_STEPPERS - 1;
accu[runner] += s_color[runner];
if (
#ifdef MIXER_ACCU_SIGNED
accu[runner] < 0
#else
accu[runner] & COLOR_A_MASK
#endif
) {
if (TERN(MIXER_ACCU_SIGNED, accu[runner] < 0, accu[runner] & COLOR_A_MASK)) {
accu[runner] &= COLOR_MASK;
return runner;
}

View file

@ -153,49 +153,36 @@ void GcodeSuite::M493_report(const bool forReplay/*=true*/) {
void GcodeSuite::M493() {
struct { bool update_n:1, update_a:1, reset_ft:1, report_h:1; } flag = { false };
if (!parser.seen_any()) flag.report_h = true;
if (!parser.seen_any())
flag.report_h = true;
else
planner.synchronize();
// Parse 'S' mode parameter.
if (parser.seenval('S')) {
const ftMotionMode_t oldmm = fxdTiCtrl.cfg.mode,
newmm = (ftMotionMode_t)parser.value_byte();
switch (newmm) {
#if HAS_X_AXIS
case ftMotionMode_ZV:
case ftMotionMode_ZVD:
case ftMotionMode_2HEI:
case ftMotionMode_3HEI:
case ftMotionMode_MZV:
//case ftMotionMode_ULENDO_FBS:
//case ftMotionMode_DISCTF:
#endif
case ftMotionMode_DISABLED:
case ftMotionMode_ENABLED:
fxdTiCtrl.cfg.mode = newmm;
flag.report_h = true;
break;
default:
SERIAL_ECHOLNPGM("?Invalid control mode [M] value.");
return;
}
if (fxdTiCtrl.cfg.mode != oldmm) switch (newmm) {
default: break;
#if HAS_X_AXIS
//case ftMotionMode_ULENDO_FBS:
//case ftMotionMode_DISCTF:
// break;
case ftMotionMode_ZV:
case ftMotionMode_ZVD:
case ftMotionMode_EI:
case ftMotionMode_2HEI:
case ftMotionMode_3HEI:
case ftMotionMode_MZV:
flag.update_n = flag.update_a = true;
#endif
case ftMotionMode_ENABLED:
flag.reset_ft = true;
break;
if (newmm != oldmm) {
switch (newmm) {
default: SERIAL_ECHOLNPGM("?Invalid control mode [S] value."); return;
#if HAS_X_AXIS
case ftMotionMode_ZV:
case ftMotionMode_ZVD:
case ftMotionMode_2HEI:
case ftMotionMode_3HEI:
case ftMotionMode_MZV:
//case ftMotionMode_ULENDO_FBS:
//case ftMotionMode_DISCTF:
flag.update_n = flag.update_a = true;
#endif
case ftMotionMode_DISABLED:
case ftMotionMode_ENABLED:
fxdTiCtrl.cfg.mode = newmm;
flag.report_h = true;
if (oldmm == ftMotionMode_DISABLED) flag.reset_ft = true;
break;
}
}
}

View file

@ -4058,12 +4058,8 @@ static_assert(_PLUS_TEST(3), "DEFAULT_MAX_ACCELERATION values must be positive."
/**
* Fixed-Time Motion limitations
*/
#if ENABLED(FT_MOTION)
#if NUM_AXES > 3
#error "FT_MOTION is currently limited to machines with 3 linear axes."
#elif ENABLED(MIXING_EXTRUDER)
#error "FT_MOTION is incompatible with MIXING_EXTRUDER."
#endif
#if ALL(FT_MOTION, MIXING_EXTRUDER)
#error "FT_MOTION does not currently support MIXING_EXTRUDER."
#endif
// Multi-Stepping Limit

View file

@ -61,53 +61,25 @@ bool FxdTiCtrl::sts_stepperBusy = false; // The stepper buffer has item
// Private variables.
// NOTE: These are sized for Ulendo FBS use.
#if HAS_X_AXIS
float FxdTiCtrl::xd[2 * (FTM_BATCH_SIZE)], // = {0.0f} Storage for fixed-time-based trajectory.
FxdTiCtrl::xm[FTM_BATCH_SIZE]; // = {0.0f} Storage for modified fixed-time-based trajectory.
#endif
#if HAS_Y_AXIS
float FxdTiCtrl::yd[2 * (FTM_BATCH_SIZE)], FxdTiCtrl::ym[FTM_BATCH_SIZE];
#endif
#if HAS_Z_AXIS
float FxdTiCtrl::zd[2 * (FTM_BATCH_SIZE)], FxdTiCtrl::zm[FTM_BATCH_SIZE];
#endif
#if HAS_EXTRUDERS
float FxdTiCtrl::ed[2 * (FTM_BATCH_SIZE)], FxdTiCtrl::em[FTM_BATCH_SIZE];
#endif
xyze_trajectory_t FxdTiCtrl::traj; // = {0.0f} Storage for fixed-time-based trajectory.
xyze_trajectoryMod_t FxdTiCtrl::trajMod; // = {0.0f} Storage for modified fixed-time-based trajectory.
block_t* FxdTiCtrl::current_block_cpy = nullptr; // Pointer to current block being processed.
bool FxdTiCtrl::blockProcRdy = false, // Indicates a block is ready to be processed.
FxdTiCtrl::blockProcRdy_z1 = false, // Storage for the previous indicator.
FxdTiCtrl::blockProcDn = false; // Indicates current block is done being processed.
bool FxdTiCtrl::batchRdy = false; // Indicates a batch of the fixed time trajectory
// has been generated, is now available in the upper -
// half of xd, yd, zd, ed vectors, and is ready to be
// post processed, if applicable, then interpolated.
bool FxdTiCtrl::batchRdyForInterp = false; // Indicates the batch is done being post processed,
// if applicable, and is ready to be converted to step commands.
bool FxdTiCtrl::runoutEna = false; // True if runout of the block hasn't been done and is allowed.
block_t* FxdTiCtrl::current_block_cpy = nullptr; // Pointer to current block being processed.
bool FxdTiCtrl::blockProcRdy = false, // Indicates a block is ready to be processed.
FxdTiCtrl::blockProcRdy_z1 = false, // Storage for the previous indicator.
FxdTiCtrl::blockProcDn = false; // Indicates current block is done being processed.
bool FxdTiCtrl::batchRdy = false; // Indicates a batch of the fixed time trajectory
// has been generated, is now available in the upper -
// half of traj.x[], y, z ... e vectors, and is ready to be
// post processed, if applicable, then interpolated.
bool FxdTiCtrl::batchRdyForInterp = false; // Indicates the batch is done being post processed,
// if applicable, and is ready to be converted to step commands.
bool FxdTiCtrl::runoutEna = false; // True if runout of the block hasn't been done and is allowed.
// Trapezoid data variables.
#if HAS_X_AXIS
float FxdTiCtrl::x_startPosn, // (mm) Start position of block
FxdTiCtrl::x_endPosn_prevBlock = 0.0f, // (mm) Start position of block
FxdTiCtrl::x_Ratio; // (ratio) Axis move ratio of block
#endif
#if HAS_Y_AXIS
float FxdTiCtrl::y_startPosn,
FxdTiCtrl::y_endPosn_prevBlock = 0.0f,
FxdTiCtrl::y_Ratio;
#endif
#if HAS_Z_AXIS
float FxdTiCtrl::z_startPosn,
FxdTiCtrl::z_endPosn_prevBlock = 0.0f,
FxdTiCtrl::z_Ratio;
#endif
#if HAS_EXTRUDERS
float FxdTiCtrl::e_startPosn,
FxdTiCtrl::e_endPosn_prevBlock = 0.0f,
FxdTiCtrl::e_Ratio;
#endif
xyze_pos_t FxdTiCtrl::startPosn, // (mm) Start position of block
FxdTiCtrl::endPosn_prevBlock = { 0.0f }; // (mm) End position of previous block
xyze_float_t FxdTiCtrl::ratio; // (ratio) Axis move ratio of block
float FxdTiCtrl::accel_P, // Acceleration prime of block. [mm/sec/sec]
FxdTiCtrl::decel_P, // Deceleration prime of block. [mm/sec/sec]
FxdTiCtrl::F_P, // Feedrate prime of block. [mm/sec]
@ -127,22 +99,8 @@ uint32_t FxdTiCtrl::makeVector_idx = 0, // Index of fixed ti
FxdTiCtrl::makeVector_batchIdx = FTM_BATCH_SIZE; // Index of fixed time trajectory generation within the batch.
// Interpolation variables.
#if HAS_X_AXIS
int32_t FxdTiCtrl::x_steps = 0; // Step count accumulator.
stepDirState_t FxdTiCtrl::x_dirState = stepDirState_NOT_SET; // Memory of the currently set step direction of the axis.
#endif
#if HAS_Y_AXIS
int32_t FxdTiCtrl::y_steps = 0;
stepDirState_t FxdTiCtrl::y_dirState = stepDirState_NOT_SET;
#endif
#if HAS_Z_AXIS
int32_t FxdTiCtrl::z_steps = 0;
stepDirState_t FxdTiCtrl::z_dirState = stepDirState_NOT_SET;
#endif
#if HAS_EXTRUDERS
int32_t FxdTiCtrl::e_steps = 0;
stepDirState_t FxdTiCtrl::e_dirState = stepDirState_NOT_SET;
#endif
xyze_long_t FxdTiCtrl::steps = { 0 }; // Step count accumulator.
xyze_stepDir_t FxdTiCtrl::dirState = LOGICAL_AXIS_ARRAY_1(stepDirState_NOT_SET); // Memory of the currently set step direction of the axis.
uint32_t FxdTiCtrl::interpIdx = 0, // Index of current data point being interpolated.
FxdTiCtrl::interpIdx_z1 = 0; // Storage for the previously calculated index above.
@ -150,16 +108,13 @@ hal_timer_t FxdTiCtrl::nextStepTicks = FTM_MIN_TICKS; // Accumulator for the nex
// Shaping variables.
#if HAS_X_AXIS
uint32_t FxdTiCtrl::xy_zi_idx = 0, // Index of storage in the data point delay vectors.
FxdTiCtrl::xy_max_i = 0; // Vector length for the selected shaper.
float FxdTiCtrl::xd_zi[FTM_ZMAX] = { 0.0f }; // Data point delay vector.
float FxdTiCtrl::x_Ai[5]; // Shaping gain vector.
uint32_t FxdTiCtrl::x_Ni[5]; // Shaping time index vector.
#endif
#if HAS_Y_AXIS
float FxdTiCtrl::yd_zi[FTM_ZMAX] = { 0.0f };
float FxdTiCtrl::y_Ai[5];
uint32_t FxdTiCtrl::y_Ni[5];
FxdTiCtrl::shaping_t FxdTiCtrl::shaping = {
0, 0,
x:{ { 0.0f }, { 0.0f }, { 0 } }, // d_zi, Ai, Ni
#if HAS_Y_AXIS
y:{ { 0.0f }, { 0.0f }, { 0 } } // d_zi, Ai, Ni
#endif
};
#endif
#if HAS_EXTRUDERS
@ -188,14 +143,22 @@ void FxdTiCtrl::runoutBlock() {
if (runoutEna && !batchRdy) { // If the window is full already (block intervals was a multiple of
// the batch size), or runout is not enabled, no runout is needed.
// Fill out the trajectory window with the last position calculated.
if (makeVector_batchIdx > FTM_BATCH_SIZE) {
if (makeVector_batchIdx > FTM_BATCH_SIZE)
for (uint32_t i = makeVector_batchIdx; i < 2 * (FTM_BATCH_SIZE); i++) {
xd[i] = xd[makeVector_batchIdx - 1];
TERN_(HAS_Y_AXIS, yd[i] = yd[makeVector_batchIdx - 1]);
TERN_(HAS_Y_AXIS, zd[i] = zd[makeVector_batchIdx - 1]);
TERN_(HAS_EXTRUDERS, ed[i] = ed[makeVector_batchIdx - 1]);
LOGICAL_AXIS_CODE(
traj.e[i] = traj.e[makeVector_batchIdx - 1],
traj.x[i] = traj.x[makeVector_batchIdx - 1],
traj.y[i] = traj.y[makeVector_batchIdx - 1],
traj.z[i] = traj.z[makeVector_batchIdx - 1],
traj.i[i] = traj.i[makeVector_batchIdx - 1],
traj.j[i] = traj.j[makeVector_batchIdx - 1],
traj.k[i] = traj.k[makeVector_batchIdx - 1],
traj.u[i] = traj.u[makeVector_batchIdx - 1],
traj.v[i] = traj.v[makeVector_batchIdx - 1],
traj.w[i] = traj.w[makeVector_batchIdx - 1]
);
}
}
makeVector_batchIdx = FTM_BATCH_SIZE;
batchRdy = true;
}
@ -234,23 +197,28 @@ void FxdTiCtrl::loop() {
// Call Ulendo FBS here.
memcpy(xm, &xd[FTM_BATCH_SIZE], sizeof(xm));
TERN_(HAS_Y_AXIS, memcpy(ym, &yd[FTM_BATCH_SIZE], sizeof(ym)));
// Copy the uncompensated vectors. (XY done, other axes uncompensated)
LOGICAL_AXIS_CODE(
memcpy(trajMod.e, &traj.e[FTM_BATCH_SIZE], sizeof(trajMod.e)),
memcpy(trajMod.x, &traj.x[FTM_BATCH_SIZE], sizeof(trajMod.x)),
memcpy(trajMod.y, &traj.y[FTM_BATCH_SIZE], sizeof(trajMod.y)),
memcpy(trajMod.z, &traj.z[FTM_BATCH_SIZE], sizeof(trajMod.z)),
memcpy(trajMod.i, &traj.i[FTM_BATCH_SIZE], sizeof(trajMod.i)),
memcpy(trajMod.j, &traj.j[FTM_BATCH_SIZE], sizeof(trajMod.j)),
memcpy(trajMod.k, &traj.k[FTM_BATCH_SIZE], sizeof(trajMod.k)),
memcpy(trajMod.u, &traj.u[FTM_BATCH_SIZE], sizeof(trajMod.u)),
memcpy(trajMod.v, &traj.v[FTM_BATCH_SIZE], sizeof(trajMod.v)),
memcpy(trajMod.w, &traj.w[FTM_BATCH_SIZE], sizeof(trajMod.w))
);
// Done compensating ...
// Shift the time series back in the window for (shaped) X and Y
TERN_(HAS_X_AXIS, memcpy(traj.x, &traj.x[FTM_BATCH_SIZE], sizeof(traj.x) / 2));
TERN_(HAS_Y_AXIS, memcpy(traj.y, &traj.y[FTM_BATCH_SIZE], sizeof(traj.y) / 2));
// Copy the uncompensated vectors.
TERN_(HAS_Z_AXIS, memcpy(zm, &zd[FTM_BATCH_SIZE], sizeof(zm)));
TERN_(HAS_EXTRUDERS, memcpy(em, &ed[FTM_BATCH_SIZE], sizeof(em)));
// Z...W and E Disabled! Uncompensated so the lower half is not used.
//TERN_(HAS_Z_AXIS, memcpy(&traj.z[0], &traj.z[FTM_BATCH_SIZE], sizeof(traj.z) / 2));
// Shift the time series back in the window.
memcpy(xd, &xd[FTM_BATCH_SIZE], sizeof(xd) / 2);
TERN_(HAS_Y_AXIS, memcpy(yd, &yd[FTM_BATCH_SIZE], sizeof(yd) / 2));
// Disabled by comment as these are uncompensated, the lower half is not used.
//TERN_(HAS_Z_AXIS, memcpy(zd, &zd[FTM_BATCH_SIZE], (sizeof(zd) / 2)));
//TERN_(HAS_EXTRUDERS, memcpy(ed, &ed[FTM_BATCH_SIZE], (sizeof(ed) / 2)));
// ... data is ready in xm, ym, zm, em.
// ... data is ready in trajMod.
batchRdyForInterp = true;
batchRdy = false; // Clear so that makeVector() may resume generating points.
@ -282,134 +250,119 @@ void FxdTiCtrl::loop() {
// Refresh the gains used by shaping functions.
// To be called on init or mode or zeta change.
void FxdTiCtrl::updateShapingA(const_float_t zeta/*=FTM_SHAPING_ZETA*/, const_float_t vtol/*=FTM_SHAPING_V_TOL*/) {
const float K = exp( -zeta * M_PI / sqrt(1.0f - sq(zeta)) ),
void FxdTiCtrl::Shaping::updateShapingA(const_float_t zeta/*=FTM_SHAPING_ZETA*/, const_float_t vtol/*=FTM_SHAPING_V_TOL*/) {
const float K = exp(-zeta * M_PI / sqrt(1.0f - sq(zeta))),
K2 = sq(K);
switch (cfg.mode) {
case ftMotionMode_ZV:
xy_max_i = 1U;
x_Ai[0] = 1.0f / (1.0f + K);
x_Ai[1] = x_Ai[0] * K;
max_i = 1U;
x.Ai[0] = 1.0f / (1.0f + K);
x.Ai[1] = x.Ai[0] * K;
break;
case ftMotionMode_ZVD:
xy_max_i = 2U;
x_Ai[0] = 1.0f / ( 1.0f + 2.0f * K + K2 );
x_Ai[1] = x_Ai[0] * 2.0f * K;
x_Ai[2] = x_Ai[0] * K2;
max_i = 2U;
x.Ai[0] = 1.0f / ( 1.0f + 2.0f * K + K2 );
x.Ai[1] = x.Ai[0] * 2.0f * K;
x.Ai[2] = x.Ai[0] * K2;
break;
case ftMotionMode_EI: {
xy_max_i = 2U;
x_Ai[0] = 0.25f * (1.0f + vtol);
x_Ai[1] = 0.50f * (1.0f - vtol) * K;
x_Ai[2] = x_Ai[0] * K2;
const float A_adj = 1.0f / (x_Ai[0] + x_Ai[1] + x_Ai[2]);
for (uint32_t i = 0U; i < 3U; i++) { x_Ai[i] *= A_adj; }
max_i = 2U;
x.Ai[0] = 0.25f * (1.0f + vtol);
x.Ai[1] = 0.50f * (1.0f - vtol) * K;
x.Ai[2] = x.Ai[0] * K2;
const float A_adj = 1.0f / (x.Ai[0] + x.Ai[1] + x.Ai[2]);
for (uint32_t i = 0U; i < 3U; i++) { x.Ai[i] *= A_adj; }
} break;
case ftMotionMode_2HEI: {
xy_max_i = 3U;
max_i = 3U;
const float vtol2 = sq(vtol);
const float X = pow(vtol2 * (sqrt(1.0f - vtol2) + 1.0f), 1.0f / 3.0f);
x_Ai[0] = ( 3.0f * sq(X) + 2.0f * X + 3.0f * vtol2 ) / (16.0f * X);
x_Ai[1] = ( 0.5f - x_Ai[0] ) * K;
x_Ai[2] = x_Ai[1] * K;
x_Ai[3] = x_Ai[0] * cu(K);
const float A_adj = 1.0f / (x_Ai[0] + x_Ai[1] + x_Ai[2] + x_Ai[3]);
for (uint32_t i = 0U; i < 4U; i++) { x_Ai[i] *= A_adj; }
x.Ai[0] = ( 3.0f * sq(X) + 2.0f * X + 3.0f * vtol2 ) / (16.0f * X);
x.Ai[1] = ( 0.5f - x.Ai[0] ) * K;
x.Ai[2] = x.Ai[1] * K;
x.Ai[3] = x.Ai[0] * cu(K);
const float A_adj = 1.0f / (x.Ai[0] + x.Ai[1] + x.Ai[2] + x.Ai[3]);
for (uint32_t i = 0U; i < 4U; i++) { x.Ai[i] *= A_adj; }
} break;
case ftMotionMode_3HEI: {
xy_max_i = 4U;
x_Ai[0] = 0.0625f * ( 1.0f + 3.0f * vtol + 2.0f * sqrt( 2.0f * ( vtol + 1.0f ) * vtol ) );
x_Ai[1] = 0.25f * ( 1.0f - vtol ) * K;
x_Ai[2] = ( 0.5f * ( 1.0f + vtol ) - 2.0f * x_Ai[0] ) * K2;
x_Ai[3] = x_Ai[1] * K2;
x_Ai[4] = x_Ai[0] * sq(K2);
const float A_adj = 1.0f / (x_Ai[0] + x_Ai[1] + x_Ai[2] + x_Ai[3] + x_Ai[4]);
for (uint32_t i = 0U; i < 5U; i++) { x_Ai[i] *= A_adj; }
max_i = 4U;
x.Ai[0] = 0.0625f * ( 1.0f + 3.0f * vtol + 2.0f * sqrt( 2.0f * ( vtol + 1.0f ) * vtol ) );
x.Ai[1] = 0.25f * ( 1.0f - vtol ) * K;
x.Ai[2] = ( 0.5f * ( 1.0f + vtol ) - 2.0f * x.Ai[0] ) * K2;
x.Ai[3] = x.Ai[1] * K2;
x.Ai[4] = x.Ai[0] * sq(K2);
const float A_adj = 1.0f / (x.Ai[0] + x.Ai[1] + x.Ai[2] + x.Ai[3] + x.Ai[4]);
for (uint32_t i = 0U; i < 5U; i++) { x.Ai[i] *= A_adj; }
} break;
case ftMotionMode_MZV: {
xy_max_i = 2U;
max_i = 2U;
const float B = 1.4142135623730950488016887242097f * K;
x_Ai[0] = 1.0f / (1.0f + B + K2);
x_Ai[1] = x_Ai[0] * B;
x_Ai[2] = x_Ai[0] * K2;
x.Ai[0] = 1.0f / (1.0f + B + K2);
x.Ai[1] = x.Ai[0] * B;
x.Ai[2] = x.Ai[0] * K2;
} break;
default:
for (uint32_t i = 0U; i < 5U; i++) x_Ai[i] = 0.0f;
xy_max_i = 0;
for (uint32_t i = 0U; i < 5U; i++) x.Ai[i] = 0.0f;
max_i = 0;
}
#if HAS_Y_AXIS
memcpy(y_Ai, x_Ai, sizeof(x_Ai)); // For now, zeta and vtol are shared across x and y.
memcpy(y.Ai, x.Ai, sizeof(x.Ai)); // For now, zeta and vtol are shared across x and y.
#endif
}
void FxdTiCtrl::updateShapingA(const_float_t zeta/*=FTM_SHAPING_ZETA*/, const_float_t vtol/*=FTM_SHAPING_V_TOL*/) {
shaping.updateShapingA(zeta, vtol);
}
// Refresh the indices used by shaping functions.
// To be called when frequencies change.
void FxdTiCtrl::updateShapingN(const_float_t xf OPTARG(HAS_Y_AXIS, const_float_t yf), const_float_t zeta/*=FTM_SHAPING_ZETA*/) {
void FxdTiCtrl::AxisShaping::updateShapingN(const_float_t f, const_float_t df) {
// Protections omitted for DBZ and for index exceeding array length.
const float df = sqrt(1.0f - sq(zeta));
switch (cfg.mode) {
case ftMotionMode_ZV:
x_Ni[1] = round((0.5f / xf / df) * (FTM_FS));
#if HAS_Y_AXIS
y_Ni[1] = round((0.5f / yf / df) * (FTM_FS));
#endif
Ni[1] = round((0.5f / f / df) * (FTM_FS));
break;
case ftMotionMode_ZVD:
case ftMotionMode_EI:
x_Ni[1] = round((0.5f / xf / df) * (FTM_FS));
x_Ni[2] = 2 * x_Ni[1];
#if HAS_Y_AXIS
y_Ni[1] = round((0.5f / yf / df) * (FTM_FS));
y_Ni[2] = 2 * y_Ni[1];
#endif
Ni[1] = round((0.5f / f / df) * (FTM_FS));
Ni[2] = Ni[1] + Ni[1];
break;
case ftMotionMode_2HEI:
x_Ni[1] = round((0.5f / xf / df) * (FTM_FS));
x_Ni[2] = 2 * x_Ni[1];
x_Ni[3] = 3 * x_Ni[1];
#if HAS_Y_AXIS
y_Ni[1] = round((0.5f / yf / df) * (FTM_FS));
y_Ni[2] = 2 * y_Ni[1];
y_Ni[3] = 3 * y_Ni[1];
#endif
Ni[1] = round((0.5f / f / df) * (FTM_FS));
Ni[2] = Ni[1] + Ni[1];
Ni[3] = Ni[2] + Ni[1];
break;
case ftMotionMode_3HEI:
x_Ni[1] = round((0.5f / xf / df) * (FTM_FS));
x_Ni[2] = 2 * x_Ni[1];
x_Ni[3] = 3 * x_Ni[1];
x_Ni[4] = 4 * x_Ni[1];
#if HAS_Y_AXIS
y_Ni[1] = round((0.5f / yf / df) * (FTM_FS));
y_Ni[2] = 2 * y_Ni[1];
y_Ni[3] = 3 * y_Ni[1];
y_Ni[4] = 4 * y_Ni[1];
#endif
Ni[1] = round((0.5f / f / df) * (FTM_FS));
Ni[2] = Ni[1] + Ni[1];
Ni[3] = Ni[2] + Ni[1];
Ni[4] = Ni[3] + Ni[1];
break;
case ftMotionMode_MZV:
x_Ni[1] = round((0.375f / xf / df) * (FTM_FS));
x_Ni[2] = 2 * x_Ni[1];
#if HAS_Y_AXIS
y_Ni[1] = round((0.375f / yf / df) * (FTM_FS));
y_Ni[2] = 2 * y_Ni[1];
#endif
Ni[1] = round((0.375f / f / df) * (FTM_FS));
Ni[2] = Ni[1] + Ni[1];
break;
default:
for (uint32_t i = 0U; i < 5U; i++) { x_Ni[i] = 0; TERN_(HAS_Y_AXIS, y_Ni[i] = 0); }
default: ZERO(Ni);
}
}
void FxdTiCtrl::updateShapingN(const_float_t xf OPTARG(HAS_Y_AXIS, const_float_t yf), const_float_t zeta/*=FTM_SHAPING_ZETA*/) {
const float df = sqrt(1.0f - sq(zeta));
shaping.x.updateShapingN(xf, df);
TERN_(HAS_Y_AXIS, shaping.y.updateShapingN(yf, df));
}
#endif // HAS_X_AXIS
// Reset all trajectory processing variables.
@ -417,42 +370,26 @@ void FxdTiCtrl::reset() {
stepperCmdBuff_produceIdx = stepperCmdBuff_consumeIdx = 0;
for (uint32_t i = 0U; i < (FTM_BATCH_SIZE); i++) { // Reset trajectory history
TERN_(HAS_X_AXIS, xd[i] = 0.0f);
TERN_(HAS_Y_AXIS, yd[i] = 0.0f);
TERN_(HAS_Z_AXIS, zd[i] = 0.0f);
TERN_(HAS_EXTRUDERS, ed[i] = 0.0f);
}
traj.reset(); // Reset trajectory history
blockProcRdy = blockProcRdy_z1 = blockProcDn = false;
batchRdy = batchRdyForInterp = false;
runoutEna = false;
TERN_(HAS_X_AXIS, x_endPosn_prevBlock = 0.0f);
TERN_(HAS_Y_AXIS, y_endPosn_prevBlock = 0.0f);
TERN_(HAS_Z_AXIS, z_endPosn_prevBlock = 0.0f);
TERN_(HAS_EXTRUDERS, e_endPosn_prevBlock = 0.0f);
endPosn_prevBlock.reset();
makeVector_idx = makeVector_idx_z1 = 0;
makeVector_batchIdx = FTM_BATCH_SIZE;
TERN_(HAS_X_AXIS, x_steps = 0);
TERN_(HAS_Y_AXIS, y_steps = 0);
TERN_(HAS_Z_AXIS, z_steps = 0);
TERN_(HAS_EXTRUDERS, e_steps = 0);
steps.reset();
interpIdx = interpIdx_z1 = 0;
TERN_(HAS_X_AXIS, x_dirState = stepDirState_NOT_SET);
TERN_(HAS_Y_AXIS, y_dirState = stepDirState_NOT_SET);
TERN_(HAS_Z_AXIS, z_dirState = stepDirState_NOT_SET);
TERN_(HAS_EXTRUDERS, e_dirState = stepDirState_NOT_SET);
dirState = LOGICAL_AXIS_ARRAY_1(stepDirState_NOT_SET);
nextStepTicks = FTM_MIN_TICKS;
#if HAS_X_AXIS
for (uint32_t i = 0U; i < (FTM_ZMAX); i++) { xd_zi[i] = 0.0f; TERN_(HAS_Y_AXIS, yd_zi[i] = 0.0f); }
xy_zi_idx = 0;
for (uint32_t i = 0U; i < (FTM_ZMAX); i++)
shaping.x.d_zi[i] = TERN_(HAS_Y_AXIS, shaping.y.d_zi[i] =) 0.0f;
shaping.zi_idx = 0;
#endif
TERN_(HAS_EXTRUDERS, e_raw_z1 = e_advanced_z1 = 0.0f);
@ -482,33 +419,34 @@ void FxdTiCtrl::loadBlockData(block_t * const current_block) {
const AxisBits direction = current_block->direction_bits;
#if HAS_X_AXIS
x_startPosn = x_endPosn_prevBlock;
float x_moveDist = current_block->steps.a / planner.settings.axis_steps_per_mm[X_AXIS];
if (!direction.x) x_moveDist *= -1.0f;
x_Ratio = x_moveDist * oneOverLength;
#endif
startPosn = endPosn_prevBlock;
xyze_pos_t moveDist = LOGICAL_AXIS_ARRAY(
current_block->steps.e / planner.settings.axis_steps_per_mm[E_AXIS_N(current_block->extruder)],
current_block->steps.x / planner.settings.axis_steps_per_mm[X_AXIS],
current_block->steps.y / planner.settings.axis_steps_per_mm[Y_AXIS],
current_block->steps.z / planner.settings.axis_steps_per_mm[Z_AXIS],
current_block->steps.i / planner.settings.axis_steps_per_mm[I_AXIS],
current_block->steps.j / planner.settings.axis_steps_per_mm[J_AXIS],
current_block->steps.k / planner.settings.axis_steps_per_mm[K_AXIS],
current_block->steps.u / planner.settings.axis_steps_per_mm[U_AXIS],
current_block->steps.v / planner.settings.axis_steps_per_mm[V_AXIS],
current_block->steps.w / planner.settings.axis_steps_per_mm[W_AXIS]
);
#if HAS_Y_AXIS
y_startPosn = y_endPosn_prevBlock;
float y_moveDist = current_block->steps.b / planner.settings.axis_steps_per_mm[Y_AXIS];
if (!direction.y) y_moveDist *= -1.0f;
y_Ratio = y_moveDist * oneOverLength;
#endif
LOGICAL_AXIS_CODE(
if (!direction.e) moveDist.e *= -1.0f,
if (!direction.x) moveDist.x *= -1.0f,
if (!direction.y) moveDist.y *= -1.0f,
if (!direction.z) moveDist.z *= -1.0f,
if (!direction.i) moveDist.i *= -1.0f,
if (!direction.j) moveDist.j *= -1.0f,
if (!direction.k) moveDist.k *= -1.0f,
if (!direction.u) moveDist.u *= -1.0f,
if (!direction.v) moveDist.v *= -1.0f,
if (!direction.w) moveDist.w *= -1.0f
);
#if HAS_Z_AXIS
z_startPosn = z_endPosn_prevBlock;
float z_moveDist = current_block->steps.c / planner.settings.axis_steps_per_mm[Z_AXIS];
if (!direction.z) z_moveDist *= -1.0f;
z_Ratio = z_moveDist * oneOverLength;
#endif
#if HAS_EXTRUDERS
e_startPosn = e_endPosn_prevBlock;
float extrusion = current_block->steps.e / planner.settings.axis_steps_per_mm[E_AXIS_N(current_block->extruder)];
if (!direction.e) extrusion *= -1.0f;
e_Ratio = extrusion * oneOverLength;
#endif
ratio = moveDist * oneOverLength;
const float spm = totalLength / current_block->step_event_count; // (steps/mm) Distance for each step
f_s = spm * current_block->initial_rate; // (steps/s) Start feedrate
@ -525,7 +463,7 @@ void FxdTiCtrl::loadBlockData(block_t * const current_block) {
odiff = oneby2a - oneby2d, // (i.e., oneby2a * 2) (mm/s) Change in speed for one second of acceleration
ldiff = totalLength - fdiff; // (mm) Distance to travel if nominal speed is reached
float T2 = (1.0f / F_n) * (ldiff - odiff * sq(F_n)); // (s) Coasting duration after nominal speed reached
if (T2 < 0.0f) {
if (T2 < 0.0f) {
T2 = 0.0f;
F_n = SQRT(ldiff / odiff); // Clip by intersection if nominal speed can't be reached.
}
@ -567,10 +505,7 @@ void FxdTiCtrl::loadBlockData(block_t * const current_block) {
// One less than (Accel + Coasting + Decel) datapoints
max_intervals = N1 + N2 + N3 - 1U;
TERN_(HAS_X_AXIS, x_endPosn_prevBlock += x_moveDist);
TERN_(HAS_Y_AXIS, y_endPosn_prevBlock += y_moveDist);
TERN_(HAS_Z_AXIS, z_endPosn_prevBlock += z_moveDist);
TERN_(HAS_EXTRUDERS, e_endPosn_prevBlock += extrusion);
endPosn_prevBlock += moveDist;
}
// Generate data points of the trajectory.
@ -596,24 +531,32 @@ void FxdTiCtrl::makeVector() {
accel_k = decel_P; // (mm/s^2) Acceleration K factor from Decel phase
}
TERN_(HAS_X_AXIS, xd[makeVector_batchIdx] = x_startPosn + x_Ratio * dist); // (mm) X position for this datapoint
TERN_(HAS_Y_AXIS, yd[makeVector_batchIdx] = y_startPosn + y_Ratio * dist); // (mm) Y
TERN_(HAS_Z_AXIS, zd[makeVector_batchIdx] = z_startPosn + z_Ratio * dist); // (mm) Z
NUM_AXIS_CODE(
traj.x[makeVector_batchIdx] = startPosn.x + ratio.x * dist,
traj.y[makeVector_batchIdx] = startPosn.y + ratio.y * dist,
traj.z[makeVector_batchIdx] = startPosn.z + ratio.z * dist,
traj.i[makeVector_batchIdx] = startPosn.i + ratio.i * dist,
traj.j[makeVector_batchIdx] = startPosn.j + ratio.j * dist,
traj.k[makeVector_batchIdx] = startPosn.k + ratio.k * dist,
traj.u[makeVector_batchIdx] = startPosn.u + ratio.u * dist,
traj.v[makeVector_batchIdx] = startPosn.v + ratio.v * dist,
traj.w[makeVector_batchIdx] = startPosn.w + ratio.w * dist
);
#if HAS_EXTRUDERS
const float new_raw_z1 = e_startPosn + e_Ratio * dist;
const float new_raw_z1 = startPosn.e + ratio.e * dist;
if (cfg.linearAdvEna) {
float dedt_adj = (new_raw_z1 - e_raw_z1) * (FTM_FS);
if (e_Ratio > 0.0f) dedt_adj += accel_k * cfg.linearAdvK;
if (ratio.e > 0.0f) dedt_adj += accel_k * cfg.linearAdvK;
e_advanced_z1 += dedt_adj * (FTM_TS);
ed[makeVector_batchIdx] = e_advanced_z1;
traj.e[makeVector_batchIdx] = e_advanced_z1;
e_raw_z1 = new_raw_z1;
}
else {
ed[makeVector_batchIdx] = new_raw_z1;
// Alternatively: ed[makeVector_batchIdx] = e_startPosn + (e_Ratio * dist) / (N1 + N2 + N3);
traj.e[makeVector_batchIdx] = new_raw_z1;
// Alternatively: ed[makeVector_batchIdx] = startPosn.e + (ratio.e * dist) / (N1 + N2 + N3);
}
#endif
@ -625,11 +568,11 @@ void FxdTiCtrl::makeVector() {
#if HAS_DYNAMIC_FREQ_MM
case dynFreqMode_Z_BASED:
if (zd[makeVector_batchIdx] != zd_z1) { // Only update if Z changed.
const float xf = cfg.baseFreq[X_AXIS] + cfg.dynFreqK[X_AXIS] * zd[makeVector_batchIdx],
yf = cfg.baseFreq[Y_AXIS] + cfg.dynFreqK[Y_AXIS] * zd[makeVector_batchIdx];
if (traj.z[makeVector_batchIdx] != zd_z1) { // Only update if Z changed.
const float xf = cfg.baseFreq[X_AXIS] + cfg.dynFreqK[X_AXIS] * traj.z[makeVector_batchIdx],
yf = cfg.baseFreq[Y_AXIS] + cfg.dynFreqK[Y_AXIS] * traj.z[makeVector_batchIdx];
updateShapingN(_MAX(xf, FTM_MIN_SHAPE_FREQ), _MAX(yf, FTM_MIN_SHAPE_FREQ));
zd_z1 = zd[makeVector_batchIdx];
zd_z1 = traj.z[makeVector_batchIdx];
}
break;
#endif
@ -638,8 +581,8 @@ void FxdTiCtrl::makeVector() {
case dynFreqMode_MASS_BASED:
// Update constantly. The optimization done for Z value makes
// less sense for E, as E is expected to constantly change.
updateShapingN( cfg.baseFreq[X_AXIS] + cfg.dynFreqK[X_AXIS] * ed[makeVector_batchIdx]
OPTARG(HAS_Y_AXIS, cfg.baseFreq[Y_AXIS] + cfg.dynFreqK[Y_AXIS] * ed[makeVector_batchIdx]) );
updateShapingN( cfg.baseFreq[X_AXIS] + cfg.dynFreqK[X_AXIS] * traj.e[makeVector_batchIdx]
OPTARG(HAS_Y_AXIS, cfg.baseFreq[Y_AXIS] + cfg.dynFreqK[Y_AXIS] * traj.e[makeVector_batchIdx]) );
break;
#endif
@ -648,22 +591,22 @@ void FxdTiCtrl::makeVector() {
// Apply shaping if in mode.
#if HAS_X_AXIS
if (WITHIN(cfg.mode, 10U, 19U)) {
xd_zi[xy_zi_idx] = xd[makeVector_batchIdx];
xd[makeVector_batchIdx] *= x_Ai[0];
if (cfg.modeHasShaper()) {
shaping.x.d_zi[shaping.zi_idx] = traj.x[makeVector_batchIdx];
traj.x[makeVector_batchIdx] *= shaping.x.Ai[0];
#if HAS_Y_AXIS
yd_zi[xy_zi_idx] = yd[makeVector_batchIdx];
yd[makeVector_batchIdx] *= y_Ai[0];
shaping.y.d_zi[shaping.zi_idx] = traj.y[makeVector_batchIdx];
traj.y[makeVector_batchIdx] *= shaping.y.Ai[0];
#endif
for (uint32_t i = 1U; i <= xy_max_i; i++) {
const uint32_t udiffx = xy_zi_idx - x_Ni[i];
xd[makeVector_batchIdx] += x_Ai[i] * xd_zi[x_Ni[i] > xy_zi_idx ? (FTM_ZMAX) + udiffx : udiffx];
for (uint32_t i = 1U; i <= shaping.max_i; i++) {
const uint32_t udiffx = shaping.zi_idx - shaping.x.Ni[i];
traj.x[makeVector_batchIdx] += shaping.x.Ai[i] * shaping.x.d_zi[shaping.x.Ni[i] > shaping.zi_idx ? (FTM_ZMAX) + udiffx : udiffx];
#if HAS_Y_AXIS
const uint32_t udiffy = xy_zi_idx - y_Ni[i];
yd[makeVector_batchIdx] += y_Ai[i] * yd_zi[y_Ni[i] > xy_zi_idx ? (FTM_ZMAX) + udiffy : udiffy];
const uint32_t udiffy = shaping.zi_idx - shaping.y.Ni[i];
traj.y[makeVector_batchIdx] += shaping.y.Ai[i] * shaping.y.d_zi[shaping.y.Ni[i] > shaping.zi_idx ? (FTM_ZMAX) + udiffy : udiffy];
#endif
}
if (++xy_zi_idx == (FTM_ZMAX)) xy_zi_idx = 0;
if (++shaping.zi_idx == (FTM_ZMAX)) shaping.zi_idx = 0;
}
#endif
@ -684,57 +627,63 @@ void FxdTiCtrl::makeVector() {
// Interpolates single data point to stepper commands.
void FxdTiCtrl::convertToSteps(const uint32_t idx) {
#if HAS_X_AXIS
int32_t x_err_P = 0;
#endif
#if HAS_Y_AXIS
int32_t y_err_P = 0;
#endif
#if HAS_Z_AXIS
int32_t z_err_P = 0;
#endif
#if HAS_EXTRUDERS
int32_t e_err_P = 0;
#endif
xyze_long_t err_P = { 0 };
//#define STEPS_ROUNDING
#if ENABLED(STEPS_ROUNDING)
#if HAS_X_AXIS
const float x_steps_tar = xm[idx] * planner.settings.axis_steps_per_mm[X_AXIS] + (xm[idx] < 0.0f ? -0.5f : 0.5f); // May be eliminated if guaranteed positive.
const int32_t x_delta = int32_t(x_steps_tar) - x_steps;
#endif
#if HAS_Y_AXIS
const float y_steps_tar = ym[idx] * planner.settings.axis_steps_per_mm[Y_AXIS] + (ym[idx] < 0.0f ? -0.5f : 0.5f);
const int32_t y_delta = int32_t(y_steps_tar) - y_steps;
#endif
#if HAS_Z_AXIS
const float z_steps_tar = zm[idx] * planner.settings.axis_steps_per_mm[Z_AXIS] + (zm[idx] < 0.0f ? -0.5f : 0.5f);
const int32_t z_delta = int32_t(z_steps_tar) - z_steps;
#endif
#if HAS_EXTRUDERS
const float e_steps_tar = em[idx] * planner.settings.axis_steps_per_mm[E_AXIS] + (em[idx] < 0.0f ? -0.5f : 0.5f);
const int32_t e_delta = int32_t(e_steps_tar) - e_steps;
#endif
const xyze_float_t steps_tar = LOGICAL_AXIS_ARRAY(
trajMod.e[idx] * planner.settings.axis_steps_per_mm[E_AXIS_N(current_block->extruder)] + (trajMod.e[idx] < 0.0f ? -0.5f : 0.5f), // May be eliminated if guaranteed positive.
trajMod.x[idx] * planner.settings.axis_steps_per_mm[X_AXIS] + (trajMod.x[idx] < 0.0f ? -0.5f : 0.5f),
trajMod.y[idx] * planner.settings.axis_steps_per_mm[Y_AXIS] + (trajMod.y[idx] < 0.0f ? -0.5f : 0.5f),
trajMod.z[idx] * planner.settings.axis_steps_per_mm[Z_AXIS] + (trajMod.z[idx] < 0.0f ? -0.5f : 0.5f),
trajMod.i[idx] * planner.settings.axis_steps_per_mm[I_AXIS] + (trajMod.i[idx] < 0.0f ? -0.5f : 0.5f),
trajMod.j[idx] * planner.settings.axis_steps_per_mm[J_AXIS] + (trajMod.j[idx] < 0.0f ? -0.5f : 0.5f),
trajMod.k[idx] * planner.settings.axis_steps_per_mm[K_AXIS] + (trajMod.k[idx] < 0.0f ? -0.5f : 0.5f),
trajMod.u[idx] * planner.settings.axis_steps_per_mm[U_AXIS] + (trajMod.u[idx] < 0.0f ? -0.5f : 0.5f),
trajMod.v[idx] * planner.settings.axis_steps_per_mm[V_AXIS] + (trajMod.v[idx] < 0.0f ? -0.5f : 0.5f),
trajMod.w[idx] * planner.settings.axis_steps_per_mm[W_AXIS] + (trajMod.w[idx] < 0.0f ? -0.5f : 0.5f),
);
xyze_long_t delta = xyze_long_t(steps_tar) - steps;
//const xyze_long_t delta = LOGICAL_AXIS_ARRAY(
// int32_t(steps_tar.e) - steps.e,
// int32_t(steps_tar.x) - steps.x,
// int32_t(steps_tar.y) - steps.y,
// int32_t(steps_tar.z) - steps.z,
// int32_t(steps_tar.i) - steps.i,
// int32_t(steps_tar.j) - steps.j,
// int32_t(steps_tar.k) - steps.k,
// int32_t(steps_tar.u) - steps.u,
// int32_t(steps_tar.v) - steps.v,
// int32_t(steps_tar.w) - steps.w
//);
#else
#if HAS_X_AXIS
const int32_t x_delta = int32_t(xm[idx] * planner.settings.axis_steps_per_mm[X_AXIS]) - x_steps;
#endif
#if HAS_Y_AXIS
const int32_t y_delta = int32_t(ym[idx] * planner.settings.axis_steps_per_mm[Y_AXIS]) - y_steps;
#endif
#if HAS_Z_AXIS
const int32_t z_delta = int32_t(zm[idx] * planner.settings.axis_steps_per_mm[Z_AXIS]) - z_steps;
#endif
#if HAS_EXTRUDERS
const int32_t e_delta = int32_t(em[idx] * planner.settings.axis_steps_per_mm[E_AXIS]) - e_steps;
#endif
xyze_long_t delta = LOGICAL_AXIS_ARRAY(
int32_t(trajMod.e[idx] * planner.settings.axis_steps_per_mm[E_AXIS_N(current_block->extruder)]) - steps.e,
int32_t(trajMod.x[idx] * planner.settings.axis_steps_per_mm[X_AXIS]) - steps.x,
int32_t(trajMod.y[idx] * planner.settings.axis_steps_per_mm[Y_AXIS]) - steps.y,
int32_t(trajMod.z[idx] * planner.settings.axis_steps_per_mm[Z_AXIS]) - steps.z,
int32_t(trajMod.i[idx] * planner.settings.axis_steps_per_mm[I_AXIS]) - steps.i,
int32_t(trajMod.j[idx] * planner.settings.axis_steps_per_mm[J_AXIS]) - steps.j,
int32_t(trajMod.k[idx] * planner.settings.axis_steps_per_mm[K_AXIS]) - steps.k,
int32_t(trajMod.u[idx] * planner.settings.axis_steps_per_mm[U_AXIS]) - steps.u,
int32_t(trajMod.v[idx] * planner.settings.axis_steps_per_mm[V_AXIS]) - steps.v,
int32_t(trajMod.w[idx] * planner.settings.axis_steps_per_mm[W_AXIS]) - steps.w
);
#endif
bool any_dirChange = (false
|| TERN0(HAS_X_AXIS, (x_delta > 0 && x_dirState != stepDirState_POS) || (x_delta < 0 && x_dirState != stepDirState_NEG))
|| TERN0(HAS_Y_AXIS, (y_delta > 0 && y_dirState != stepDirState_POS) || (y_delta < 0 && y_dirState != stepDirState_NEG))
|| TERN0(HAS_Z_AXIS, (z_delta > 0 && z_dirState != stepDirState_POS) || (z_delta < 0 && z_dirState != stepDirState_NEG))
|| TERN0(HAS_EXTRUDERS, (e_delta > 0 && e_dirState != stepDirState_POS) || (e_delta < 0 && e_dirState != stepDirState_NEG))
LOGICAL_AXIS_GANG(
|| (delta.e > 0 && dirState.e != stepDirState_POS) || (delta.e < 0 && dirState.e != stepDirState_NEG),
|| (delta.x > 0 && dirState.x != stepDirState_POS) || (delta.x < 0 && dirState.x != stepDirState_NEG),
|| (delta.y > 0 && dirState.y != stepDirState_POS) || (delta.y < 0 && dirState.y != stepDirState_NEG),
|| (delta.z > 0 && dirState.z != stepDirState_POS) || (delta.z < 0 && dirState.z != stepDirState_NEG),
|| (delta.i > 0 && dirState.i != stepDirState_POS) || (delta.i < 0 && dirState.i != stepDirState_NEG),
|| (delta.j > 0 && dirState.j != stepDirState_POS) || (delta.j < 0 && dirState.j != stepDirState_NEG),
|| (delta.k > 0 && dirState.k != stepDirState_POS) || (delta.k < 0 && dirState.k != stepDirState_NEG),
|| (delta.u > 0 && dirState.u != stepDirState_POS) || (delta.u < 0 && dirState.u != stepDirState_NEG),
|| (delta.v > 0 && dirState.v != stepDirState_POS) || (delta.v < 0 && dirState.v != stepDirState_NEG),
|| (delta.w > 0 && dirState.w != stepDirState_POS) || (delta.w < 0 && dirState.w != stepDirState_NEG)
)
);
for (uint32_t i = 0U; i < (FTM_STEPS_PER_UNIT_TIME); i++) {
@ -745,109 +694,48 @@ void FxdTiCtrl::convertToSteps(const uint32_t idx) {
bool anyStep = false;
// Commands are written in a bitmask with step and dir as single bits
auto COMMAND_SET = [&](auto &d, auto &e, auto &s, auto &b, auto bd, auto bs) {
if (d >= 0) {
if (e + d < (FTM_CTS_COMPARE_VAL)) {
e += d;
}
else {
s++;
b |= bd | bs;
e += d - (FTM_STEPS_PER_UNIT_TIME);
anyStep = true;
}
}
else {
if ((e + d) > -(FTM_CTS_COMPARE_VAL)) {
e += d;
}
else {
s--;
b |= bs;
e += d + (FTM_STEPS_PER_UNIT_TIME);
anyStep = true;
}
}
};
// Init all step/dir bits to 0 (defaulting to reverse/negative motion)
stepperCmdBuff[stepperCmdBuff_produceIdx] = 0;
// Commands are written in the format:
// |X_step|X_direction|Y_step|Y_direction|Z_step|Z_direction|E_step|E_direction|
#if HAS_X_AXIS
if (x_delta >= 0) {
if ((x_err_P + x_delta) < (FTM_CTS_COMPARE_VAL)) {
x_err_P += x_delta;
}
else {
x_steps++;
stepperCmdBuff[stepperCmdBuff_produceIdx] |= _BV(FT_BIT_DIR_X) | _BV(FT_BIT_STEP_X);
x_err_P += x_delta - (FTM_STEPS_PER_UNIT_TIME);
anyStep = true;
}
}
else {
if ((x_err_P + x_delta) > -(FTM_CTS_COMPARE_VAL)) {
x_err_P += x_delta;
}
else {
x_steps--;
stepperCmdBuff[stepperCmdBuff_produceIdx] |= _BV(FT_BIT_STEP_X);
x_err_P += x_delta + (FTM_STEPS_PER_UNIT_TIME);
anyStep = true;
}
}
#endif // HAS_X_AXIS
#if HAS_Y_AXIS
if (y_delta >= 0) {
if ((y_err_P + y_delta) < (FTM_CTS_COMPARE_VAL)) {
y_err_P += y_delta;
}
else {
y_steps++;
stepperCmdBuff[stepperCmdBuff_produceIdx] |= _BV(FT_BIT_DIR_Y) | _BV(FT_BIT_STEP_Y);
y_err_P += y_delta - (FTM_STEPS_PER_UNIT_TIME);
anyStep = true;
}
}
else {
if ((y_err_P + y_delta) > -(FTM_CTS_COMPARE_VAL)) {
y_err_P += y_delta;
}
else {
y_steps--;
stepperCmdBuff[stepperCmdBuff_produceIdx] |= _BV(FT_BIT_STEP_Y);
y_err_P += y_delta + (FTM_STEPS_PER_UNIT_TIME);
anyStep = true;
}
}
#endif // HAS_Y_AXIS
#if HAS_Z_AXIS
if (z_delta >= 0) {
if ((z_err_P + z_delta) < (FTM_CTS_COMPARE_VAL)) {
z_err_P += z_delta;
}
else {
z_steps++;
stepperCmdBuff[stepperCmdBuff_produceIdx] |= _BV(FT_BIT_DIR_Z) | _BV(FT_BIT_STEP_Z);
z_err_P += z_delta - (FTM_STEPS_PER_UNIT_TIME);
anyStep = true;
}
}
else {
if ((z_err_P + z_delta) > -(FTM_CTS_COMPARE_VAL)) {
z_err_P += z_delta;
}
else {
z_steps--;
stepperCmdBuff[stepperCmdBuff_produceIdx] |= _BV(FT_BIT_STEP_Z);
z_err_P += z_delta + (FTM_STEPS_PER_UNIT_TIME);
anyStep = true;
}
}
#endif // HAS_Z_AXIS
#if HAS_EXTRUDERS
if (e_delta >= 0) {
if ((e_err_P + e_delta) < (FTM_CTS_COMPARE_VAL)) {
e_err_P += e_delta;
}
else {
e_steps++;
stepperCmdBuff[stepperCmdBuff_produceIdx] |= _BV(FT_BIT_DIR_E) | _BV(FT_BIT_STEP_E);
e_err_P += e_delta - (FTM_STEPS_PER_UNIT_TIME);
anyStep = true;
}
}
else {
if ((e_err_P + e_delta) > -(FTM_CTS_COMPARE_VAL)) {
e_err_P += e_delta;
}
else {
e_steps--;
stepperCmdBuff[stepperCmdBuff_produceIdx] |= _BV(FT_BIT_STEP_E);
e_err_P += e_delta + (FTM_STEPS_PER_UNIT_TIME);
anyStep = true;
}
}
#endif // HAS_EXTRUDERS
// Set up step/dir bits for all axes
LOGICAL_AXIS_CODE(
COMMAND_SET(delta.e, err_P.e, steps.e, stepperCmdBuff[stepperCmdBuff_produceIdx], _BV(FT_BIT_DIR_E), _BV(FT_BIT_STEP_E)),
COMMAND_SET(delta.x, err_P.x, steps.x, stepperCmdBuff[stepperCmdBuff_produceIdx], _BV(FT_BIT_DIR_X), _BV(FT_BIT_STEP_X)),
COMMAND_SET(delta.y, err_P.y, steps.y, stepperCmdBuff[stepperCmdBuff_produceIdx], _BV(FT_BIT_DIR_Y), _BV(FT_BIT_STEP_Y)),
COMMAND_SET(delta.z, err_P.z, steps.z, stepperCmdBuff[stepperCmdBuff_produceIdx], _BV(FT_BIT_DIR_Z), _BV(FT_BIT_STEP_Z)),
COMMAND_SET(delta.i, err_P.i, steps.i, stepperCmdBuff[stepperCmdBuff_produceIdx], _BV(FT_BIT_DIR_I), _BV(FT_BIT_STEP_I)),
COMMAND_SET(delta.j, err_P.j, steps.j, stepperCmdBuff[stepperCmdBuff_produceIdx], _BV(FT_BIT_DIR_J), _BV(FT_BIT_STEP_J)),
COMMAND_SET(delta.k, err_P.k, steps.k, stepperCmdBuff[stepperCmdBuff_produceIdx], _BV(FT_BIT_DIR_K), _BV(FT_BIT_STEP_K)),
COMMAND_SET(delta.u, err_P.u, steps.u, stepperCmdBuff[stepperCmdBuff_produceIdx], _BV(FT_BIT_DIR_U), _BV(FT_BIT_STEP_U)),
COMMAND_SET(delta.v, err_P.v, steps.v, stepperCmdBuff[stepperCmdBuff_produceIdx], _BV(FT_BIT_DIR_V), _BV(FT_BIT_STEP_V)),
COMMAND_SET(delta.w, err_P.w, steps.w, stepperCmdBuff[stepperCmdBuff_produceIdx], _BV(FT_BIT_DIR_W), _BV(FT_BIT_STEP_W)),
);
if (!anyStep) {
nextStepTicks += (FTM_MIN_TICKS);
@ -858,46 +746,25 @@ void FxdTiCtrl::convertToSteps(const uint32_t idx) {
const uint8_t dir_index = stepperCmdBuff_produceIdx >> 3,
dir_bit = stepperCmdBuff_produceIdx & 0x7;
if (any_dirChange) {
SBI(stepperCmdBuff_ApplyDir[dir_index], dir_bit);
#if HAS_X_AXIS
if (x_delta > 0) {
stepperCmdBuff[stepperCmdBuff_produceIdx] |= _BV(FT_BIT_DIR_X);
x_dirState = stepDirState_POS;
}
else {
x_dirState = stepDirState_NEG;
}
#endif
#if HAS_Y_AXIS
if (y_delta > 0) {
stepperCmdBuff[stepperCmdBuff_produceIdx] |= _BV(FT_BIT_DIR_Y);
y_dirState = stepDirState_POS;
}
else {
y_dirState = stepDirState_NEG;
}
#endif
auto DIR_SET = [&](auto &d, auto &c, auto &b, auto bd) {
if (d > 0) { b |= bd; c = stepDirState_POS; } else { c = stepDirState_NEG; }
};
#if HAS_Z_AXIS
if (z_delta > 0) {
stepperCmdBuff[stepperCmdBuff_produceIdx] |= _BV(FT_BIT_DIR_Z);
z_dirState = stepDirState_POS;
}
else {
z_dirState = stepDirState_NEG;
}
#endif
#if HAS_EXTRUDERS
if (e_delta > 0) {
stepperCmdBuff[stepperCmdBuff_produceIdx] |= _BV(FT_BIT_DIR_E);
e_dirState = stepDirState_POS;
}
else {
e_dirState = stepDirState_NEG;
}
#endif
LOGICAL_AXIS_CODE(
DIR_SET(delta.e, dirState.e, stepperCmdBuff[stepperCmdBuff_produceIdx], _BV(FT_BIT_DIR_E)),
DIR_SET(delta.x, dirState.x, stepperCmdBuff[stepperCmdBuff_produceIdx], _BV(FT_BIT_DIR_X)),
DIR_SET(delta.y, dirState.y, stepperCmdBuff[stepperCmdBuff_produceIdx], _BV(FT_BIT_DIR_Y)),
DIR_SET(delta.z, dirState.z, stepperCmdBuff[stepperCmdBuff_produceIdx], _BV(FT_BIT_DIR_Z)),
DIR_SET(delta.i, dirState.i, stepperCmdBuff[stepperCmdBuff_produceIdx], _BV(FT_BIT_DIR_I)),
DIR_SET(delta.j, dirState.j, stepperCmdBuff[stepperCmdBuff_produceIdx], _BV(FT_BIT_DIR_J)),
DIR_SET(delta.k, dirState.k, stepperCmdBuff[stepperCmdBuff_produceIdx], _BV(FT_BIT_DIR_K)),
DIR_SET(delta.u, dirState.u, stepperCmdBuff[stepperCmdBuff_produceIdx], _BV(FT_BIT_DIR_U)),
DIR_SET(delta.v, dirState.v, stepperCmdBuff[stepperCmdBuff_produceIdx], _BV(FT_BIT_DIR_V)),
DIR_SET(delta.w, dirState.w, stepperCmdBuff[stepperCmdBuff_produceIdx], _BV(FT_BIT_DIR_W)),
);
any_dirChange = false;
}
@ -905,12 +772,10 @@ void FxdTiCtrl::convertToSteps(const uint32_t idx) {
CBI(stepperCmdBuff_ApplyDir[dir_index], dir_bit);
}
if (stepperCmdBuff_produceIdx == (FTM_STEPPERCMD_BUFF_SIZE) - 1) {
if (stepperCmdBuff_produceIdx == (FTM_STEPPERCMD_BUFF_SIZE) - 1)
stepperCmdBuff_produceIdx = 0;
}
else {
else
stepperCmdBuff_produceIdx++;
}
nextStepTicks = FTM_MIN_TICKS;
}

View file

@ -126,18 +126,8 @@ class FxdTiCtrl {
private:
#if HAS_X_AXIS
static float xd[2 * (FTM_BATCH_SIZE)], xm[FTM_BATCH_SIZE];
#endif
#if HAS_Y_AXIS
static float yd[2 * (FTM_BATCH_SIZE)], ym[FTM_BATCH_SIZE];
#endif
#if HAS_Z_AXIS
static float zd[2 * (FTM_BATCH_SIZE)], zm[FTM_BATCH_SIZE];
#endif
#if HAS_EXTRUDERS
static float ed[2 * (FTM_BATCH_SIZE)], em[FTM_BATCH_SIZE];
#endif
static xyze_trajectory_t traj;
static xyze_trajectoryMod_t trajMod;
static block_t *current_block_cpy;
static bool blockProcRdy, blockProcRdy_z1, blockProcDn;
@ -145,18 +135,9 @@ class FxdTiCtrl {
static bool runoutEna;
// Trapezoid data variables.
#if HAS_X_AXIS
static float x_startPosn, x_endPosn_prevBlock, x_Ratio;
#endif
#if HAS_Y_AXIS
static float y_startPosn, y_endPosn_prevBlock, y_Ratio;
#endif
#if HAS_Z_AXIS
static float z_startPosn, z_endPosn_prevBlock, z_Ratio;
#endif
#if HAS_EXTRUDERS
static float e_startPosn, e_endPosn_prevBlock, e_Ratio;
#endif
static xyze_pos_t startPosn, // (mm) Start position of block
endPosn_prevBlock; // (mm) End position of previous block
static xyze_float_t ratio; // (ratio) Axis move ratio of block
static float accel_P, decel_P,
F_P,
f_s,
@ -174,37 +155,38 @@ class FxdTiCtrl {
// Interpolation variables.
static uint32_t interpIdx,
interpIdx_z1;
#if HAS_X_AXIS
static int32_t x_steps;
static stepDirState_t x_dirState;
#endif
#if HAS_Y_AXIS
static int32_t y_steps;
static stepDirState_t y_dirState;
#endif
#if HAS_Z_AXIS
static int32_t z_steps;
static stepDirState_t z_dirState;
#endif
#if HAS_EXTRUDERS
static int32_t e_steps;
static stepDirState_t e_dirState;
#endif
static xyze_long_t steps;
static xyze_stepDir_t dirState;
static hal_timer_t nextStepTicks;
// Shaping variables.
#if HAS_X_AXIS
static uint32_t xy_zi_idx, xy_max_i;
static float xd_zi[FTM_ZMAX];
static float x_Ai[5];
static uint32_t x_Ni[5];
#endif
#if HAS_Y_AXIS
static float yd_zi[FTM_ZMAX];
static float y_Ai[5];
static uint32_t y_Ni[5];
#endif
typedef struct AxisShaping {
float d_zi[FTM_ZMAX] = { 0.0f }; // Data point delay vector.
float Ai[5]; // Shaping gain vector.
uint32_t Ni[5]; // Shaping time index vector.
void updateShapingN(const_float_t f, const_float_t df);
} axis_shaping_t;
typedef struct Shaping {
uint32_t zi_idx, // Index of storage in the data point delay vectors.
max_i; // Vector length for the selected shaper.
axis_shaping_t x;
#if HAS_Y_AXIS
axis_shaping_t y;
#endif
void updateShapingA(const_float_t zeta=FTM_SHAPING_ZETA, const_float_t vtol=FTM_SHAPING_V_TOL);
} shaping_t;
static shaping_t shaping; // Shaping data
#endif // HAS_X_AXIS
// Linear advance variables.
#if HAS_EXTRUDERS

View file

@ -42,17 +42,30 @@ enum dynFreqMode_t : uint8_t {
dynFreqMode_MASS_BASED = 2U
};
enum stepDirState_t {
enum stepDirState_t : uint8_t {
stepDirState_NOT_SET = 0U,
stepDirState_POS = 1U,
stepDirState_NEG = 2U
};
typedef struct XYZEarray<float, 2 * (FTM_BATCH_SIZE)> xyze_trajectory_t;
typedef struct XYZEarray<float, FTM_BATCH_SIZE> xyze_trajectoryMod_t;
typedef struct XYZEval<stepDirState_t> xyze_stepDir_t;
enum {
FT_BIT_DIR_E, FT_BIT_STEP_E,
FT_BIT_DIR_Z, FT_BIT_STEP_Z,
FT_BIT_DIR_Y, FT_BIT_STEP_Y,
FT_BIT_DIR_X, FT_BIT_STEP_X,
LIST_N(DOUBLE(LOGICAL_AXES),
FT_BIT_DIR_E, FT_BIT_STEP_E,
FT_BIT_DIR_X, FT_BIT_STEP_X,
FT_BIT_DIR_Y, FT_BIT_STEP_Y,
FT_BIT_DIR_Z, FT_BIT_STEP_Z,
FT_BIT_DIR_I, FT_BIT_STEP_I,
FT_BIT_DIR_J, FT_BIT_STEP_J,
FT_BIT_DIR_K, FT_BIT_STEP_K,
FT_BIT_DIR_U, FT_BIT_STEP_U,
FT_BIT_DIR_V, FT_BIT_STEP_V,
FT_BIT_DIR_W, FT_BIT_STEP_W
),
FT_BIT_COUNT
};

View file

@ -33,6 +33,10 @@
#include "../lcd/marlinui.h"
#include "../inc/MarlinConfig.h"
#if ENABLED(FT_MOTION)
#include "ft_motion.h"
#endif
#if IS_SCARA
#include "../libs/buzzer.h"
#include "../lcd/marlinui.h"
@ -2098,6 +2102,21 @@ void prepare_line_to_destination() {
void homeaxis(const AxisEnum axis) {
#if ENABLED(FT_MOTION)
// Disable ft-motion for homing
struct OnExit {
ftMotionMode_t oldmm;
OnExit() {
oldmm = fxdTiCtrl.cfg.mode;
fxdTiCtrl.cfg.mode = ftMotionMode_DISABLED;
}
~OnExit() {
fxdTiCtrl.cfg.mode = oldmm;
fxdTiCtrl.init();
}
} on_exit;
#endif
#if ANY(MORGAN_SCARA, MP_SCARA)
// Only Z homing (with probe) is permitted
if (axis != Z_AXIS) { BUZZ(100, 880); return; }

View file

@ -2608,23 +2608,23 @@ hal_timer_t Stepper::block_phase_isr() {
#define Z_MOVE_TEST !!current_block->steps.c
#endif
AxisBits axis_bits;
AxisBits didmove;
NUM_AXIS_CODE(
if (X_MOVE_TEST) axis_bits.a = true,
if (Y_MOVE_TEST) axis_bits.b = true,
if (Z_MOVE_TEST) axis_bits.c = true,
if (current_block->steps.i) axis_bits.i = true,
if (current_block->steps.j) axis_bits.j = true,
if (current_block->steps.k) axis_bits.k = true,
if (current_block->steps.u) axis_bits.u = true,
if (current_block->steps.v) axis_bits.v = true,
if (current_block->steps.w) axis_bits.w = true
if (X_MOVE_TEST) didmove.a = true,
if (Y_MOVE_TEST) didmove.b = true,
if (Z_MOVE_TEST) didmove.c = true,
if (current_block->steps.i) didmove.i = true,
if (current_block->steps.j) didmove.j = true,
if (current_block->steps.k) didmove.k = true,
if (current_block->steps.u) didmove.u = true,
if (current_block->steps.v) didmove.v = true,
if (current_block->steps.w) didmove.w = true
);
//if (current_block->steps.e) axis_bits.e = true;
//if (current_block->steps.a) axis_bits.x = true;
//if (current_block->steps.b) axis_bits.y = true;
//if (current_block->steps.c) axis_bits.z = true;
axis_did_move = axis_bits;
//if (current_block->steps.e) didmove.e = true;
//if (current_block->steps.a) didmove.x = true;
//if (current_block->steps.b) didmove.y = true;
//if (current_block->steps.c) didmove.z = true;
axis_did_move = didmove;
// No acceleration / deceleration time elapsed so far
acceleration_time = deceleration_time = 0;
@ -2758,7 +2758,7 @@ hal_timer_t Stepper::block_phase_isr() {
}
#endif
}
}
} // !current_block
// Return the interval to wait
return interval;
@ -3136,9 +3136,9 @@ void Stepper::init() {
* when shaping an axis.
*/
void Stepper::set_shaping_damping_ratio(const AxisEnum axis, const_float_t zeta) {
// from the damping ratio, get a factor that can be applied to advance_dividend for fixed point maths
// for ZV, we use amplitudes 1/(1+K) and K/(1+K) where K = exp(-zeta * M_PI / sqrt(1.0f - zeta * zeta))
// which can be converted to 1:7 fixed point with an excellent fit with a 3rd order polynomial
// From the damping ratio, get a factor that can be applied to advance_dividend for fixed-point maths.
// For ZV, we use amplitudes 1/(1+K) and K/(1+K) where K = exp(-zeta * π / sqrt(1.0f - zeta * zeta))
// which can be converted to 1:7 fixed point with an excellent fit with a 3rd-order polynomial.
float factor2;
if (zeta <= 0.0f) factor2 = 64.0f;
else if (zeta >= 1.0f) factor2 = 0.0f;
@ -3411,39 +3411,93 @@ void Stepper::report_positions() {
USING_TIMED_PULSE();
#if HAS_Z_AXIS
// Z is handled differently to update the stepper
// counts (needed by Marlin for bed level probing).
const bool z_fwd = TEST(command, FT_BIT_DIR_Z),
z_step = TEST(command, FT_BIT_STEP_Z);
#endif
const xyze_bool_t axis_step = LOGICAL_AXIS_ARRAY(
TEST(command, FT_BIT_STEP_E),
TEST(command, FT_BIT_STEP_X), TEST(command, FT_BIT_STEP_Y), TEST(command, FT_BIT_STEP_Z),
TEST(command, FT_BIT_STEP_I), TEST(command, FT_BIT_STEP_J), TEST(command, FT_BIT_STEP_K),
TEST(command, FT_BIT_STEP_U), TEST(command, FT_BIT_STEP_V), TEST(command, FT_BIT_STEP_W)
);
// Apply directions (which will apply to the entire linear move)
AxisBits axis_dir = last_direction_bits;
if (applyDir) {
TERN_(HAS_X_AXIS, X_APPLY_DIR(TEST(command, FT_BIT_DIR_X), false));
TERN_(HAS_Y_AXIS, Y_APPLY_DIR(TEST(command, FT_BIT_DIR_Y), false));
TERN_(HAS_Z_AXIS, Z_APPLY_DIR(z_fwd, false));
TERN_(HAS_EXTRUDERS, E_APPLY_DIR(TEST(command, FT_BIT_DIR_E), false));
axis_dir = LOGICAL_AXIS_ARRAY(
TEST(command, FT_BIT_DIR_E),
TEST(command, FT_BIT_DIR_X), TEST(command, FT_BIT_DIR_Y), TEST(command, FT_BIT_DIR_Z),
TEST(command, FT_BIT_DIR_I), TEST(command, FT_BIT_DIR_J), TEST(command, FT_BIT_DIR_K),
TEST(command, FT_BIT_DIR_U), TEST(command, FT_BIT_DIR_V), TEST(command, FT_BIT_DIR_W)
);
LOGICAL_AXIS_CODE(
E_APPLY_DIR(axis_dir.e, false),
X_APPLY_DIR(axis_dir.x, false), Y_APPLY_DIR(axis_dir.y, false), Z_APPLY_DIR(axis_dir.z, false),
I_APPLY_DIR(axis_dir.i, false), J_APPLY_DIR(axis_dir.j, false), K_APPLY_DIR(axis_dir.k, false),
U_APPLY_DIR(axis_dir.u, false), V_APPLY_DIR(axis_dir.v, false), W_APPLY_DIR(axis_dir.w, false)
);
last_direction_bits = axis_dir;
DIR_WAIT_AFTER();
}
TERN_(HAS_X_AXIS, X_APPLY_STEP(TEST(command, FT_BIT_STEP_X), false));
TERN_(HAS_Y_AXIS, Y_APPLY_STEP(TEST(command, FT_BIT_STEP_Y), false));
TERN_(HAS_Z_AXIS, Z_APPLY_STEP(z_step, false));
TERN_(HAS_EXTRUDERS, E_APPLY_STEP(TEST(command, FT_BIT_STEP_E), false));
// Start a step pulse
LOGICAL_AXIS_CODE(
if (axis_step.e) E_APPLY_STEP(STEP_STATE_E, false),
if (axis_step.x) X_APPLY_STEP(STEP_STATE_X, false), if (axis_step.y) Y_APPLY_STEP(STEP_STATE_Y, false),
if (axis_step.z) Z_APPLY_STEP(STEP_STATE_Z, false), if (axis_step.i) I_APPLY_STEP(STEP_STATE_I, false),
if (axis_step.j) J_APPLY_STEP(STEP_STATE_J, false), if (axis_step.k) K_APPLY_STEP(STEP_STATE_K, false),
if (axis_step.u) U_APPLY_STEP(STEP_STATE_U, false), if (axis_step.v) V_APPLY_STEP(STEP_STATE_V, false),
if (axis_step.w) W_APPLY_STEP(STEP_STATE_W, false)
);
// Begin waiting for the minimum pulse duration
START_TIMED_PULSE();
#if HAS_Z_AXIS
// Update step counts
if (z_step) count_position.z += z_fwd ? 1 : -1;
// Update axis direction adders
count_direction = LOGICAL_AXIS_ARRAY(
int8_t(axis_dir.e ? 1 : -1),
int8_t(axis_dir.x ? 1 : -1), int8_t(axis_dir.y ? 1 : -1), int8_t(axis_dir.z ? 1 : -1),
int8_t(axis_dir.i ? 1 : -1), int8_t(axis_dir.j ? 1 : -1), int8_t(axis_dir.k ? 1 : -1),
int8_t(axis_dir.u ? 1 : -1), int8_t(axis_dir.v ? 1 : -1), int8_t(axis_dir.w ? 1 : -1)
);
// Update stepper counts - required for various operations
LOGICAL_AXIS_CODE(
if (axis_step.e) count_position.e += count_direction.e,
if (axis_step.x) count_position.x += count_direction.x, if (axis_step.y) count_position.y += count_direction.y,
if (axis_step.z) count_position.z += count_direction.z, if (axis_step.i) count_position.i += count_direction.i,
if (axis_step.j) count_position.j += count_direction.j, if (axis_step.k) count_position.k += count_direction.k,
if (axis_step.u) count_position.u += count_direction.u, if (axis_step.v) count_position.v += count_direction.v,
if (axis_step.w) count_position.w += count_direction.w
);
#if HAS_EXTRUDERS
#if ENABLED(E_DUAL_STEPPER_DRIVERS)
constexpr bool e_axis_has_dedge = AXIS_HAS_DEDGE(E0) && AXIS_HAS_DEDGE(E1);
#else
#define _EDGE_BIT(N) | (AXIS_HAS_DEDGE(E##N) << TOOL_ESTEPPER(N))
constexpr Flags<E_STEPPERS> e_stepper_dedge { 0 REPEAT(EXTRUDERS, _EDGE_BIT) };
const bool e_axis_has_dedge = e_stepper_dedge[stepper_extruder];
#endif
#endif
AWAIT_HIGH_PULSE();
// Only wait for axes without edge stepping
const bool any_wait = false LOGICAL_AXIS_GANG(
|| (!e_axis_has_dedge && axis_step.e),
|| (!AXIS_HAS_DEDGE(X) && axis_step.x), || (!AXIS_HAS_DEDGE(Y) && axis_step.y), || (!AXIS_HAS_DEDGE(Z) && axis_step.z),
|| (!AXIS_HAS_DEDGE(I) && axis_step.i), || (!AXIS_HAS_DEDGE(J) && axis_step.j), || (!AXIS_HAS_DEDGE(K) && axis_step.k),
|| (!AXIS_HAS_DEDGE(U) && axis_step.u), || (!AXIS_HAS_DEDGE(V) && axis_step.v), || (!AXIS_HAS_DEDGE(W) && axis_step.w)
);
TERN_(HAS_X_AXIS, X_APPLY_STEP(0, false));
TERN_(HAS_Y_AXIS, Y_APPLY_STEP(0, false));
TERN_(HAS_Z_AXIS, Z_APPLY_STEP(0, false));
TERN_(HAS_EXTRUDERS, E_APPLY_STEP(0, false));
// Allow pulses to be registered by stepper drivers
if (any_wait) AWAIT_HIGH_PULSE();
// Stop pulses. Axes with DEDGE will do nothing, assuming STEP_STATE_* is HIGH
LOGICAL_AXIS_CODE(
if (axis_step.e) E_APPLY_STEP(!STEP_STATE_E, false),
if (axis_step.x) X_APPLY_STEP(!STEP_STATE_X, false), if (axis_step.y) Y_APPLY_STEP(!STEP_STATE_Y, false),
if (axis_step.z) Z_APPLY_STEP(!STEP_STATE_Z, false), if (axis_step.i) I_APPLY_STEP(!STEP_STATE_I, false),
if (axis_step.j) J_APPLY_STEP(!STEP_STATE_J, false), if (axis_step.k) K_APPLY_STEP(!STEP_STATE_K, false),
if (axis_step.u) U_APPLY_STEP(!STEP_STATE_U, false), if (axis_step.v) V_APPLY_STEP(!STEP_STATE_V, false),
if (axis_step.w) W_APPLY_STEP(!STEP_STATE_W, false)
);
} // Stepper::fxdTiCtrl_stepper
@ -3499,28 +3553,17 @@ void Stepper::report_positions() {
// or the set conditions should be changed from the block to
// the motion trajectory or motor commands.
AxisBits axis_bits;
AxisBits didmove;
static abce_ulong_t debounce{0};
auto debounce_axis = [&](const AxisEnum axis) {
if (current_block->steps[axis]) debounce[axis] = (AXIS_DID_MOVE_DEB) * 400; // divide by 0.0025f */
if (debounce[axis]) { didmove.bset(axis); debounce[axis]--; }
};
#define _DEBOUNCE(N) debounce_axis(AxisEnum(N));
static uint32_t a_debounce = 0U;
if (!!current_block->steps.a) a_debounce = (AXIS_DID_MOVE_DEB) * 400; // divide by 0.0025f
if (a_debounce) { axis_bits.a = true; a_debounce--; }
#if HAS_Y_AXIS
static uint32_t b_debounce = 0U;
if (!!current_block->steps.b) b_debounce = (AXIS_DID_MOVE_DEB) * 400;
if (b_debounce) { axis_bits.b = true; b_debounce--; }
#endif
#if HAS_Z_AXIS
static uint32_t c_debounce = 0U;
if (!!current_block->steps.c) c_debounce = (AXIS_DID_MOVE_DEB) * 400;
if (c_debounce) { axis_bits.c = true; c_debounce--; }
#endif
#if HAS_EXTRUDERS
static uint32_t e_debounce = 0U;
if (!!current_block->steps.e) e_debounce = (AXIS_DID_MOVE_DEB) * 400;
if (e_debounce) { axis_bits.e = true; e_debounce--; }
#endif
if (current_block) { REPEAT(LOGICAL_AXES, _DEBOUNCE); }
axis_did_move = axis_bits;
axis_did_move = didmove;
}
#endif // FT_MOTION

View file

@ -573,6 +573,8 @@ void reset_stepper_drivers(); // Called by settings.load / settings.reset
#define REV_E_DIR(E) do{ E0_DIR_WRITE((E) ? HIGH : LOW ); }while(0)
#endif
#define TOOL_ESTEPPER(T) ((T) >> 1)
#elif HAS_PRUSA_MMU2 // One multiplexed stepper driver
#define E_STEP_WRITE(E,V) E0_STEP_WRITE(V)
@ -726,6 +728,10 @@ void reset_stepper_drivers(); // Called by settings.load / settings.reset
#endif
#ifndef TOOL_ESTEPPER
#define TOOL_ESTEPPER(T) (T)
#endif
//
// Individual stepper enable / disable macros
//

View file

@ -1,6 +1,6 @@
#!/usr/bin/env bash
#
# Build tests for STM32F103RC BigTreeTech (SKR Mini E3)
# Build tests for STM32F103RC_btt (BigTreeTech SKR Mini E3)
#
# exit on first failure