12 #include <gsl/gsl_sf_zeta.h>
13 #include <boost/bind.hpp>
15 #include <TFitResult.h>
16 #include <gsl/gsl_sf_gegenbauer.h>
17 #include <gsl/gsl_sf_expint.h>
29 w_J = gsl_integration_cquad_workspace_alloc(100);
31 checkcache_int_tau = 0;
32 checkcache_int_mu = 0;
33 checkcache_int_el = 0;
35 double max_double = std::numeric_limits<double>::max();
37 fplusz0_cache = max_double;
38 rho1to2_cache = max_double;
39 N_0_cache = max_double;
40 alpha_0_cache = max_double;
41 alpha_p_cache = max_double;
42 beta_0_cache = max_double;
43 beta_p_cache = max_double;
44 gamma_0_cache = max_double;
45 gamma_p_cache = max_double;
46 af0_1_cache = max_double;
47 af0_2_cache = max_double;
48 afplus_0_cache = max_double;
49 afplus_1_cache = max_double;
50 afplus_2_cache = max_double;
53 af0_3_cache = max_double;
54 afplus_3_cache = max_double;
57 CS_cache = max_double;
58 CSp_cache = max_double;
59 CP_cache = max_double;
60 CPp_cache = max_double;
61 CV_cache = max_double;
62 CVp_cache = max_double;
63 CA_cache = max_double;
64 CAp_cache = max_double;
65 CT_cache = max_double;
66 CTp_cache = max_double;
79 <<
"af0_1" <<
"af0_2" <<
"afplus_0" <<
"afplus_1" <<
"afplus_2"
81 <<
"af0_3" <<
"afplus_3"
83 <<
"mBc1m_1" <<
"mBc1m_2" <<
"mBc1m_3" <<
"mBc1m_4"
84 <<
"mBc0p_1" <<
"mBc0p_2" <<
"chitildeT" <<
"chiL" <<
"nI";
86 std::stringstream out;
88 throw std::runtime_error(
"MPlnu: vector " + out.str() +
" not implemented");
94 <<
"fplusz0" <<
"rho1to2"
95 <<
"N_0" <<
"alpha_0" <<
"alpha_p" <<
"beta_0" <<
"beta_p" <<
"gamma_0" <<
"gamma_p";
103 void MPlnu::updateParameters()
121 amplsq_factor =
GF*
GF*Vcb.abs2()/(64.*M_PI*M_PI*M_PI*
MM*
MM*
MM);
228 std::stringstream out;
230 throw std::runtime_error(
"MPlnu: vector " + out.str() +
" not implemented");
245 if ((fplusz0 != fplusz0_cache) || (rho1to2 != rho1to2_cache)
246 || (N_0 != N_0_cache)
247 || (alpha_0 != alpha_0_cache) || (alpha_p != alpha_p_cache)
248 || (beta_0 != beta_0_cache) || (beta_p != beta_p_cache)
249 || (gamma_0 != gamma_0_cache) || (gamma_p != gamma_p_cache)
250 || (af0_1 != af0_1_cache) || (af0_2 != af0_2_cache)
251 || (afplus_0 != afplus_0_cache) || (afplus_1 != afplus_1_cache)
252 || (afplus_2 != afplus_2_cache)
254 || (af0_3 != af0_3_cache) || (afplus_3 != afplus_3_cache)
256 || (CS != CS_cache) || (CSp != CSp_cache)
257 || (CP != CP_cache) || (CPp != CPp_cache)
258 || (CV != CV_cache) || (CVp != CVp_cache)
259 || (CA != CA_cache) || (CAp != CAp_cache)
260 || (CT != CT_cache) || (CTp != CTp_cache)) {
261 checkcache_int_tau = 0;
262 checkcache_int_mu = 0;
263 checkcache_int_el = 0;
266 if ((checkcache_int_tau == 0) || (checkcache_int_mu == 0) || (checkcache_int_el == 0)) {
268 cached_intJ1_tau = integrateJ(1, q2min, q2max);
269 cached_intJ3_tau = integrateJ(3, q2min, q2max);
270 cached_intJ2_tau = 0.;
272 checkcache_int_tau = 1;
275 cached_intJ1_mu = integrateJ(1, q2min, q2max);
276 cached_intJ3_mu = integrateJ(3, q2min, q2max);
277 cached_intJ2_mu = 0.;
279 checkcache_int_mu = 1;
282 cached_intJ1_el = integrateJ(1, q2min, q2max);
283 cached_intJ3_el = integrateJ(3, q2min, q2max);
284 cached_intJ2_el = 0.;
286 checkcache_int_el = 1;
290 fplusz0_cache = fplusz0;
291 rho1to2_cache = rho1to2;
293 alpha_0_cache = alpha_0;
294 alpha_p_cache = alpha_p;
295 beta_0_cache = beta_0;
296 beta_p_cache = beta_p;
297 gamma_0_cache = gamma_0;
298 gamma_p_cache = gamma_p;
302 afplus_0_cache = afplus_0;
303 afplus_1_cache = afplus_1;
304 afplus_2_cache = afplus_2;
307 afplus_3_cache = afplus_3;
315 af0_0 -= (af0_1 *
z0 + af0_2 *
z0 *
z0 + af0_3 *
z0 *
z0 *
z0) / phi_f0(
z0) / ((
z0 - z0p_1) / (1. -
z0 * z0p_1)*(
z0 - z0p_2) / (1. -
z0 * z0p_2));
317 af0_0 -= (af0_1 *
z0 + af0_2 *
z0 *
z0) / phi_f0(
z0) / ((
z0 - z0p_1) / (1. -
z0 * z0p_1)*(
z0 - z0p_2) / (1. -
z0 * z0p_2));
319 af0_0 *= phi_f0(
z0)*((
z0 - z0p_1) / (1. -
z0 * z0p_1)*(
z0 - z0p_2) / (1. -
z0 * z0p_2));
342 double MPlnu::lambda_half(
double a,
double b,
double c)
344 return sqrt(a*a+b*b+c*c-2.*(a*b+a*c+b*c));
350 double MPlnu::phi_fplus(
double z)
353 double prefac = 8. * (
MP /
MM)*(
MP /
MM) /
MM *
sqrt(8. * nI / (3. * M_PI * chitildeT));
354 double num = (1. + z)*(1. + z) *
sqrt(1. - z);
355 double den = (1. +
MP /
MM)*(1. - z) + 2. *
sqrt(
MP /
MM)*(1. + z);
356 double den5 = den * den * den * den*den;
357 return prefac * num / den5;
360 double MPlnu::phi_f0(
double z)
363 double prefac = (
MP /
MM)*(1. - (
MP /
MM)*(
MP /
MM)) *
sqrt(8. * nI / (M_PI * chiL));
364 double num = (1. - z * z) *
sqrt((1. - z));
365 double den = (1. +
MP /
MM)*(1. - z) + 2. *
sqrt(
MP /
MM)*(1. + z);
366 double den4 = den * den * den*den;
367 return prefac * num / den4;
370 double MPlnu::fplus(
double q2)
372 double w =
w0 - q2 / (2. *
MM *
MP);
373 double z = (
sqrt(w + 1.) - M_SQRT2) / (
sqrt(w + 1.) + M_SQRT2);
375 return fplusz0 * N_0 * (1. - alpha_p*8. * rho1to2 * z + beta_p*(51. * rho1to2 - 10.) * z * z - gamma_p*(252. * rho1to2 - 84.) * z * z * z);
377 double P_fplus = (z - z1m_1) / (1. - z * z1m_1)*(z - z1m_2) / (1. - z * z1m_2)*(z - z1m_3) / (1. - z * z1m_3);
379 return (afplus_0 + afplus_1 * z + afplus_2 * z * z + afplus_3 * z * z * z) / phi_fplus(z) / P_fplus;
381 return (afplus_0 + afplus_1 * z + afplus_2 * z * z) / phi_fplus(z) / P_fplus;
386 double MPlnu::f0(
double q2)
388 double w =
w0 - q2 / (2. *
MM *
MP);
389 double z = (
sqrt(w + 1.) - M_SQRT2) / (
sqrt(w + 1.) + M_SQRT2);
391 double prefac0 = 2. * (
MP /
MM) / (1. +
MP /
MM)/ (1. +
MP /
MM)*(1. +
w0);
392 double norm = prefac0 * (1. - alpha_0*0.0068 * (
w0 - 1.) + beta_0*0.0017 * (
w0 - 1.)*(
w0 - 1.) - gamma_0*0.0013 * (
w0 - 1.)*(
w0 - 1.)*(
w0 - 1.));
393 double prefac = fplus(q2)* prefac0/(1. +
w0)*(1. + w);
395 return prefac/norm * (1. - alpha_0*0.0068 * (w - 1.) + beta_0*0.0017 * (w - 1.)*(w - 1.) - gamma_0*0.0013 * (w - 1.)*(w - 1.)*(w - 1.));
397 double P_f0 = (z - z0p_1) / (1. - z * z0p_1)*(z - z0p_2) / (1. - z * z0p_2);
399 return (
af0_0 + af0_1 * z + af0_2 * z * z + af0_3 * z * z * z) / phi_f0(z) / P_f0;
401 return (
af0_0 + af0_1 * z + af0_2 * z * z) / phi_f0(z) / P_f0;
406 double MPlnu::fT(
double q2)
416 return lambda_half(
MM*
MM,
MP*
MP, q2) / 2. /
sqrt(q2)*((CV + CVp) * fplus(q2) + 2. *
Mb / (
MM +
MP)*(C7 + C7p) * fT(q2));
421 return lambda_half(
MM*
MM,
MP*
MP, q2) / 2. /
sqrt(q2)*(CA + CAp) * fplus(q2);
426 return (
MM *
MM -
MP *
MP) / 2. * ((CP + CPp) / (
Mb -
Mc)+(
Mlep +
Mnu) / q2 * (CA + CAp)) * f0(q2);
431 return (
MM *
MM -
MP *
MP) / 2. * ((CS + CSp) / (
Mb -
Mc)+(
Mlep -
Mnu) / q2 * (CV + CVp)) * f0(q2);
449 double lambda_MM = lambda_half(
MM*
MM,
MP*
MP, q2);
451 double lambda_lep2 = lambda_lep*lambda_lep;
452 double Elep =
sqrt(
Mlep *
Mlep + lambda_lep2 / (4. * q2));
453 double Enu =
sqrt(
Mnu *
Mnu + lambda_lep2 / (4. * q2));
454 double Gprefactor = lambda_MM * lambda_lep / q2;
456 return Gprefactor * ((4. * (Elep * Enu +
Mlep *
Mnu) + lambda_lep2 / (3. * q2)) *
HV(q2).abs2()+
457 (4. * (Elep * Enu -
Mlep *
Mnu) + lambda_lep2 / (3. * q2)) * HA(q2).abs2()+
458 (4. * (Elep * Enu -
Mlep *
Mnu) + lambda_lep2 / q2) * HS(q2).abs2()+
459 (4. * (Elep * Enu +
Mlep *
Mnu) + lambda_lep2 / q2) * HP(q2).abs2()+
460 (8. * (Elep * Enu -
Mlep *
Mnu) - lambda_lep2 / (12. * q2)) * HT(q2).abs2()+
461 (16. * (Elep * Enu +
Mlep *
Mnu) - lambda_lep2 / (12. * q2)) * HTt(q2).abs2() +
462 8. * M_SQRT2 *(Enu *
Mlep - Elep *
Mnu)*(HA(q2) * HT(q2).conjugate()).imag() +
463 16. * (Enu *
Mlep + Elep *
Mnu)*(
HV(q2) * HTt(q2).conjugate()).imag());
468 double lambda_MM = lambda_half(
MM*
MM,
MP*
MP, q2);
470 double Gprefactor = lambda_MM * lambda_lep / q2;
472 return -Gprefactor * 4. * lambda_lep * ((HA(q2)*(
Mlep -
Mnu) * HP(q2).conjugate()
473 +
HV(q2)*(
Mlep +
Mnu) * HS(q2).conjugate()).real() /
sqrt(q2)
479 double lambda_MM = lambda_half(
MM*
MM,
MP*
MP, q2);
481 double lambda_lep2 = lambda_lep*lambda_lep;
482 double Gprefactor = lambda_MM * lambda_lep / q2;
484 return -Gprefactor * (4. * lambda_lep2 / (3. * q2))*(HA(q2).abs2() +
HV(q2).abs2() - 2. * HT(q2).abs2() - 4. * HTt(q2).abs2());
490 double MPlnu::J1(
double q2)
493 return amplsq_factor * (G0(q2) - G2(q2) / 2).real();
496 double MPlnu::J2(
double q2)
499 return amplsq_factor * G1(q2).real();
502 double MPlnu::J3(
double q2)
505 return amplsq_factor * 3. / 2. * G2(q2).real();
508 double MPlnu::dGammadq2(
double q2)
510 if ((q2 < q2min) or (q2 > (
MM -
MP)*(
MM -
MP)))
return 0.;
511 double sqlambdaB = lambda_half(q2,
MM*
MM,
MP*
MP);
512 double prefac = (CV-CA)*(CV-CA)*
GF*
GF*Vcb.abs2()*
MM*sqlambdaB/192./M_PI/M_PI/M_PI;
515 double TotAmp2 = coeff_fp*fplus(q2)*fplus(q2)+coeff_f0*f0(q2)*f0(q2);
523 double MPlnu::integrateJ(
int i,
double q2_min,
double q2_max)
525 old_handler = gsl_set_error_handler_off();
529 if (
lep ==
StandardModel::TAU)
if ((checkcache_int_tau == 1) && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ1_tau;
530 if (
lep ==
StandardModel::MU)
if ((checkcache_int_mu == 1) && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ1_mu;
531 if (
lep ==
StandardModel::ELECTRON)
if ((checkcache_int_el == 1) && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ1_el;
533 if (gsl_integration_cquad(&FJ, q2_min, q2_max, 1.e-2, 1.e-1, w_J, &J_res, &J_err, NULL) != 0) std::numeric_limits<double>::quiet_NaN();
534 gsl_set_error_handler(old_handler);
538 if (
lep ==
StandardModel::TAU)
if ((checkcache_int_tau == 1) && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ2_tau;
539 if (
lep ==
StandardModel::MU)
if ((checkcache_int_mu == 1) && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ2_mu;
540 if (
lep ==
StandardModel::ELECTRON)
if ((checkcache_int_el == 1) && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ2_el;
542 if (gsl_integration_cquad(&FJ, q2_min, q2_max, 1.e-2, 1.e-1, w_J, &J_res, &J_err, NULL) != 0) std::numeric_limits<double>::quiet_NaN();
543 gsl_set_error_handler(old_handler);
547 if (
lep ==
StandardModel::TAU)
if ((checkcache_int_tau == 1) && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ3_tau;
548 if (
lep ==
StandardModel::MU)
if ((checkcache_int_mu == 1) && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ3_mu;
549 if (
lep ==
StandardModel::ELECTRON)
if ((checkcache_int_el == 1) && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ3_el;
551 if (gsl_integration_cquad(&FJ, q2_min, q2_max, 1.e-2, 1.e-1, w_J, &J_res, &J_err, NULL) != 0) std::numeric_limits<double>::quiet_NaN();
552 gsl_set_error_handler(old_handler);
556 if (gsl_integration_cquad(&FJ, q2_min, q2_max, 1.e-2, 1.e-1, w_J, &J_res, &J_err, NULL) != 0) std::numeric_limits<double>::quiet_NaN();
557 gsl_set_error_handler(old_handler);
561 gsl_set_error_handler(old_handler);
562 std::stringstream out;
564 throw std::runtime_error(
"MPlnu::integrateJ: index " + out.str() +
" not implemented");
569 double MPlnu::dGammadw(
double q2)
573 return 2. * (J1(q2) + J3(q2) / 3.);
581 double q2_min = (2. *
MM *
MP)*(
w0 - w_max);
582 double q2_max = (2. *
MM *
MP)*(
w0 - w_min);
584 double intJ1 = integrateJ(1, q2_min, q2_max);
585 double intJ3 = integrateJ(3, q2_min, q2_max);
587 return 2. * (intJ1 + intJ3 / 3.);
598 return afplus_0 * afplus_0 + afplus_1 * afplus_1 + afplus_2 * afplus_2 + afplus_3 * afplus_3;
600 return afplus_0 * afplus_0 + afplus_1 * afplus_1 + afplus_2 * afplus_2;
609 return af0_0 *
af0_0 + af0_1 * af0_1 + af0_2 * af0_2 + af0_3 * af0_3;
611 return af0_0 *
af0_0 + af0_1 * af0_1 + af0_2*af0_2;
620 return 1707.54 * afplus_0 * afplus_0 + 1299.57 * afplus_0 * afplus_1 + 442.82 * afplus_1 * afplus_1 - 356.01 * afplus_0 * afplus_2
621 - 101.62 * afplus_1 * afplus_2 + 34.947 * afplus_2 * afplus_2 - 206.767 * afplus_0 * afplus_3 - 127.668 * afplus_1 * afplus_3
622 + 33.234 * afplus_2 * afplus_3 + 16.475 * afplus_3 * afplus_3;
624 return 442.82 * afplus_0 * afplus_0 - 101.619 * afplus_0 * afplus_1 + 34.947* afplus_1 * afplus_1 - 127.668 * afplus_0 * afplus_2 + 33.234 * afplus_1 * afplus_2 + 16.4754 * afplus_2 * afplus_2;