12 #include <boost/bind.hpp>
14 #include <gsl/gsl_sf_gegenbauer.h>
15 #include <gsl/gsl_sf_expint.h>
27 w_J = gsl_integration_cquad_workspace_alloc (100);
29 checkcache_int_tau = 0;
30 checkcache_int_mu = 0;
31 checkcache_int_el = 0;
33 double max_double = std::numeric_limits<double>::max();
35 hA1w1_cache = max_double;
36 rho2_cache = max_double;
37 R1w1_cache = max_double;
38 R2w1_cache = max_double;
39 N_A_cache = max_double;
40 N_1_cache =max_double;
41 N_2_cache = max_double;
42 j_A_cache= max_double;
43 j_0_cache = max_double;
44 j_1_cache = max_double;
45 j_2_cache = max_double;
46 k_A_cache = max_double;
47 k_0_cache = max_double;
48 k_1_cache = max_double;
49 k_2_cache = max_double;
50 l_A_cache = max_double;
52 af0_cache = max_double;
53 af1_cache = max_double;
54 af2_cache = max_double;
55 ag0_cache = max_double;
56 ag1_cache = max_double;
57 ag2_cache = max_double;
58 aF11_cache = max_double;
59 aF12_cache = max_double;
60 aF21_cache = max_double;
61 aF22_cache = max_double;
63 CS_cache = max_double;
64 CSp_cache = max_double;
65 CP_cache = max_double;
66 CPp_cache = max_double;
67 CV_cache = max_double;
68 CVp_cache = max_double;
69 CA_cache = max_double;
70 CAp_cache = max_double;
71 CT_cache = max_double;
72 CTp_cache = max_double;
85 <<
"af0" <<
"af1" <<
"af2" <<
"ag0" <<
"ag1" <<
"ag2"
86 <<
"aF11" <<
"aF12" <<
"aF21" <<
"aF22"
87 <<
"mBcstV1" <<
"mBcstV2" <<
"mBcstV3" <<
"mBcstV4"
88 <<
"mBcstA1" <<
"mBcstA2" <<
"mBcstA3" <<
"mBcstA4"
89 <<
"mBcstP1" <<
"mBcstP2" <<
"mBcstP3"
90 <<
"chiTV" <<
"chiTA" <<
"chiTP" <<
"nI";
92 std::stringstream out;
94 throw std::runtime_error(
"MVlnu: vector " + out.str() +
" not implemented");
100 <<
"hA1w1" <<
"rho2" <<
"R1w1" <<
"R2w1"
101 <<
"N_A" <<
"N_1" <<
"N_2" <<
"j_A" <<
"j_0" <<
"j_1" <<
"j_2"
102 <<
"k_A" <<
"k_0" <<
"k_1" <<
"k_2" <<
"l_A";
110 void MVlnu::updateParameters()
128 amplsq_factor =
GF*
GF*Vcb.abs2()/(64.*M_PI*M_PI*M_PI*
MM*
MM*
MM);
266 std::stringstream out;
268 throw std::runtime_error(
"MVlnu: vector " + out.str() +
" not implemented");
296 if ((hA1w1 != hA1w1_cache) || (rho2 != rho2_cache) || (R1w1 != R1w1_cache) || (R2w1 != R2w1_cache)
297 || (N_A != N_A_cache) || (N_1 != N_1_cache) || (N_2 != N_2_cache)
298 || (j_A != j_A_cache) || (j_0 != j_0_cache) || (j_1 != j_1_cache) || (j_2 != j_2_cache)
299 || (k_A != k_A_cache) || (k_0 != k_0_cache) || (k_1 != k_1_cache) || (k_2 != k_2_cache)
300 || (l_A != l_A_cache)
301 || (af0 != af0_cache) || (af1 != af1_cache) || (af2 != af2_cache)
302 || (ag0 != ag0_cache) || (ag1 != af1_cache) || (ag2 != af2_cache)
303 || (aF11 != aF11_cache) || (aF12 != aF12_cache)
304 || (aF21 != aF21_cache) || (aF22 != aF22_cache)
305 || (CS != CS_cache) || (CSp != CSp_cache)
306 || (CP != CP_cache) || (CPp != CPp_cache)
307 || (CV != CV_cache) || (CVp != CVp_cache)
308 || (CA != CA_cache) || (CAp != CAp_cache)
309 || (CT != CT_cache) || (CTp != CTp_cache)) {
310 checkcache_int_tau = 0;
311 checkcache_int_mu = 0;
312 checkcache_int_el = 0;
315 if ((checkcache_int_tau == 0) || (checkcache_int_mu == 0) || (checkcache_int_el == 0)) {
317 cached_intJ1s_tau = integrateJ(1, q2min, q2max);
318 cached_intJ1c_tau = integrateJ(2, q2min, q2max);
319 cached_intJ2s_tau = integrateJ(3, q2min, q2max);
320 cached_intJ2c_tau = integrateJ(4, q2min, q2max);
321 cached_intJ3_tau = integrateJ(5, q2min, q2max);
322 cached_intJ6s_tau = integrateJ(8, q2min, q2max);
323 cached_intJ6c_tau = integrateJ(9, q2min, q2max);
324 cached_intJ9_tau = integrateJ(12, q2min, q2max);
325 cached_intJ4_tau = 0.;
326 cached_intJ5_tau = 0.;
327 cached_intJ7_tau = 0.;
328 cached_intJ8_tau = 0.;
336 checkcache_int_tau = 1;
339 cached_intJ1s_mu = integrateJ(1, q2min, q2max);
340 cached_intJ1c_mu = integrateJ(2, q2min, q2max);
341 cached_intJ2s_mu = integrateJ(3, q2min, q2max);
342 cached_intJ2c_mu = integrateJ(4, q2min, q2max);
343 cached_intJ3_mu = integrateJ(5, q2min, q2max);
344 cached_intJ6s_mu = integrateJ(8, q2min, q2max);
345 cached_intJ6c_mu = integrateJ(9, q2min, q2max);
346 cached_intJ9_mu = integrateJ(12, q2min, q2max);
347 cached_intJ4_mu = 0.;
348 cached_intJ5_mu = 0.;
349 cached_intJ7_mu = 0.;
350 cached_intJ8_mu = 0.;
358 checkcache_int_mu = 1;
361 cached_intJ1s_el = integrateJ(1, q2min, q2max);
362 cached_intJ1c_el = integrateJ(2, q2min, q2max);
363 cached_intJ2s_el = integrateJ(3, q2min, q2max);
364 cached_intJ2c_el = integrateJ(4, q2min, q2max);
365 cached_intJ3_el = integrateJ(5, q2min, q2max);
366 cached_intJ6s_el = integrateJ(8, q2min, q2max);
367 cached_intJ6c_el = integrateJ(9, q2min, q2max);
368 cached_intJ9_el = integrateJ(12, q2min, q2max);
369 cached_intJ4_el = 0.;
370 cached_intJ5_el = 0.;
371 cached_intJ7_el = 0.;
372 cached_intJ8_el = 0.;
380 checkcache_int_el = 1;
433 double MVlnu::lambda_half(
double a,
double b,
double c)
435 return sqrt(a*a+b*b+c*c-2.*(a*b+a*c+b*c));
441 double MVlnu::phi_f(
double z)
443 double prefac = 4. * (
MV_o_MM) /
MM /
MM *
sqrt(nI / (3. * M_PI * chiTA));
444 double num = (1. + z) *
sqrt((1. - z)*(1. - z)*(1. - z));
446 double den4 = den * den * den*den;
447 return prefac * num / den4;
450 double MVlnu::f_BGL(
double q2)
452 double w =
w0 - q2 / (2. *
MM *
MV);
453 double z = (
sqrt(w + 1.) - M_SQRT2) / (
sqrt(w + 1.) + M_SQRT2);
454 double Pfacf = (z - zA1) / (1. - z * zA1)*(z - zA2) / (1. - z * zA2)*(z - zA3) / (1. - z * zA3)*(z - zA4) / (1. - z * zA4);
455 double phif = phi_f(z);
456 return (af0 + af1 * z + af2 * z * z) / phif / Pfacf;
459 double MVlnu::phi_g(
double z)
461 double prefac =
sqrt(nI / (3. * M_PI * chiTV));
464 double den4 = den * den * den*den;
465 return prefac * num / den4;
468 double MVlnu::g_BGL(
double q2)
470 double w =
w0 - q2 / (2. *
MM *
MV);
471 double z = (
sqrt(w + 1.) - M_SQRT2) / (
sqrt(w + 1.) + M_SQRT2);
472 double Pfacg = (z - zV1) / (1. - z * zV1)*(z - zV2) / (1. - z * zV2)*(z - zV3) / (1. - z * zV3)*(z - zV4) / (1. - z * zV4);
473 double phig = phi_g(z);
474 return (ag0 + ag1 * z + ag2 * z * z) / phig / Pfacg;
477 double MVlnu::phi_F1(
double z)
480 double num = (1. + z) *
sqrt((1. - z)*(1. - z)*(1. - z)*(1. - z)*(1. - z));
482 double den5 = den * den * den * den*den;
483 return prefac * num / den5;
486 double MVlnu::F1_BGL(
double q2)
488 double w =
w0 - q2 / (2. *
MM *
MV);
489 double z = (
sqrt(w + 1.) - M_SQRT2) / (
sqrt(w + 1.) + M_SQRT2);
490 double PfacF1 = (z - zA1) / (1. - z * zA1)*(z - zA2) / (1. - z * zA2)*(z - zA3) / (1. - z * zA3)*(z - zA4) / (1. - z * zA4);
491 double phiF1 = phi_F1(z);
492 double aF10 = (
MM -
MV)*(phi_F1(0.) / phi_f(0.)) * af0;
493 return (aF10 + aF11 * z + aF12 * z * z) / phiF1 / PfacF1;
496 double MVlnu::phi_F2(
double z)
499 double num = (1. + z)*(1. + z) /
sqrt(1. - z);
501 double den4 = den * den * den*den;
502 return prefac * num / den4;
505 double MVlnu::F2_BGL(
double q2)
507 double w =
w0 - q2 / (2. *
MM *
MV);
508 double z = (
sqrt(w + 1.) - M_SQRT2) / (
sqrt(w + 1.) + M_SQRT2);
509 double z0 = (
sqrt(
w0 + 1.) - M_SQRT2) / (
sqrt(
w0 + 1.) + M_SQRT2);
510 double PfacF2 = (z - zP1) / (1. - z * zP1)*(z - zP2) / (1. - z * zP2)*(z - zP3) / (1. - z * zP3);
511 double PfacF2z0 = (z0 - zP1) / (1. - z0 * zP1)*(z0 - zP2) / (1. - z0 * zP2)*(z0 - zP3) / (1. - z0 * zP3);
512 double phiF2 = phi_F2(z);
513 double phiF2z0 = phi_F2(z0);
514 double aF20 = PfacF2z0 * phiF2z0 * 2. * F1_BGL(0.) / (
MM *
MM -
MV *
MV) - aF21 * z0 - aF22 * z0*z0;
515 return (aF20 + aF21 * z + aF22 * z * z) / phiF2 / PfacF2;
518 double MVlnu::hA1(
double q2)
520 double w =
w0 - q2 / (2. *
MM *
MV);
521 double z = (
sqrt(w + 1.) - M_SQRT2) / (
sqrt(w + 1.) + M_SQRT2);
522 if (
CLNflag)
return hA1w1 * N_A * (1. - j_A * 8. * rho2 * z + k_A * (53. * rho2 - 15.) * z * z - l_A * (231. * rho2 - 91.) * z * z * z);
523 else return f_BGL(q2) /
sqrt(
MM *
MV) / (1. + w);
526 double MVlnu::R1(
double q2)
528 double w =
w0 - q2 / (2. *
MM *
MV);
529 return N_1 * R1w1 - j_1 * 0.12 * (w - 1.) + k_1 * 0.05 * (w - 1.)*(w - 1.);
532 double MVlnu::R2(
double q2)
534 double w =
w0 - q2 / (2. *
MM *
MV);
535 return N_2 * R2w1 + j_2 * 0.11 * (w - 1.) - k_2 * 0.06 * (w - 1.)*(w - 1.);
538 double MVlnu::R0(
double q2)
540 double w =
w0 - q2 / (2. *
MM *
MV);
542 double R2q2at0 = R2(0.);
543 double R0q2at0 = (
MM +
MV - (
MM -
MV) * R2q2at0) / (2. *
MV);
545 double R0w1 = R0q2at0 + j_0 * 0.11 * (
w0 - 1.) - k_0 * 0.01 * (
w0 - 1.)*(
w0 - 1.);
547 return R0w1 - j_0 * 0.11 * (w - 1.) + k_0 * 0.01 * (w - 1.)*(w - 1.);
550 double MVlnu::V(
double q2)
552 if (
CLNflag)
return R1(q2) /
RV * hA1(q2);
553 else return (
MM +
MV) * g_BGL(q2) / 2.;
556 double MVlnu::A0(
double q2)
558 double w =
w0 - q2 / (2. *
MM *
MV);
559 if (
CLNflag)
return R0(q2) /
RV * hA1(q2);
560 else return F2_BGL(q2) /
RV / (1. + w);
563 double MVlnu::A1(
double q2)
565 double w =
w0 - q2 / (2. *
MM *
MV);
566 return (w + 1.)*
RV / 2. * hA1(q2);
569 double MVlnu::A2(
double q2)
571 double w =
w0 - q2 / (2. *
MM *
MV);
572 if (
CLNflag)
return R2(q2) /
RV * hA1(q2);
573 else return (
MM +
MV) / 2. / (w * w - 1.) /
MM /
MV * ((w -
MV_o_MM) * f_BGL(q2) - F1_BGL(q2) /
MM);
576 double MVlnu::T1(
double q2)
578 double delta_T1 = 0.;
579 return (
Mb +
Mc) / (
MM +
MV) * V(q2)*(1. + delta_T1);
582 double MVlnu::T2(
double q2)
584 double delta_T2 = 0.;
585 return (
Mb -
Mc) / (
MM -
MV) * A1(q2)*(1. + delta_T2);
588 double MVlnu::A12(
double q2)
595 double MVlnu::T23(
double q2)
597 double delta_T23 = 0.;
599 8 *
MM *
MV * (
MV *
MV -
MM *
MM) * A12(q2)) / (4. *
MM * (
MV -
MM) *
MV * q2)*(1. + delta_T23);
613 + (
Mb / q2)*((C7 + C7p) * lambda_half(
MM*
MM,
MV*
MV, q2) * T1(q2)-(C7 - C7p)*(
MM *
MM -
MV *
MV) * T2(q2)));
619 + (
Mb / q2)*(-(C7 + C7p) * lambda_half(
MM*
MM,
MV*
MV, q2) * T1(q2)-(C7 - C7p)*(
MM *
MM -
MV *
MV) * T2(q2)));
649 return 2. * M_SQRT2 * (
MM *
MV) / (
MM +
MV)*(CT + CTp) * T23(q2);
654 return 2. * (
MM *
MV) / (
MM +
MV)*(CT - CTp) * T23(q2);
659 return ((CT - CTp) * lambda_half(
MM*
MM,
MV*
MV, q2) * T1(q2)-(CT + CTp)*(
MM *
MM -
MV *
MV) * T2(q2)) / (
sqrt(2. * q2));
664 return ((CT + CTp) * lambda_half(
MM*
MM,
MV*
MV, q2) * T1(q2)-(CT - CTp)*(
MM *
MM -
MV *
MV) * T2(q2)) / (
sqrt(2. * q2));
669 return (-(CT - CTp) * lambda_half(
MM*
MM,
MV*
MV, q2) * T1(q2)-(CT + CTp)*(
MM *
MM -
MV *
MV) * T2(q2)) / (
sqrt(2. * q2));
674 return (-(CT + CTp) * lambda_half(
MM*
MM,
MV*
MV, q2) * T1(q2)-(CT - CTp)*(
MM *
MM -
MV *
MV) * T2(q2)) / (
sqrt(2. * q2));
682 double lambda_MM = lambda_half(
MM*
MM,
MV*
MV, q2);
684 double lambda_lep2 = lambda_lep*lambda_lep;
685 double Elep =
sqrt(
Mlep *
Mlep + lambda_lep2 / (4. * q2));
686 double Enu =
sqrt(
Mnu *
Mnu + lambda_lep2 / (4. * q2));
687 double Gprefactor = lambda_MM * lambda_lep / q2;
689 return Gprefactor * (4. / 9. * (3. * Elep * Enu + lambda_lep2 / (4. * q2))*(HVp(q2).abs2() + HVm(q2).abs2() + HV0(q2).abs2() + HAp(q2).abs2() + HAm(q2).abs2() + HA0(q2).abs2()) +
690 4. *
Mlep *
Mnu / 3. * (HVp(q2).abs2() + HVm(q2).abs2() + HV0(q2).abs2() - HAp(q2).abs2() - HAm(q2).abs2() - HA0(q2).abs2()) +
691 4. / 3. * ((Elep * Enu -
Mlep *
Mnu + lambda_lep2 / (4. * q2)) * HS(q2).abs2()+(Elep * Enu +
Mlep *
Mnu + lambda_lep2 / (4. * q2)) * HP(q2).abs2()) +
692 16. / 9. * (3. * (Elep * Enu +
Mlep *
Mnu) - lambda_lep2 / (4. * q2))*(HTpt(q2).abs2() + HTmt(q2).abs2() + HT0t(q2).abs2()) +
693 8. / 9. * (3. * (Elep * Enu -
Mlep *
Mnu) - lambda_lep2 / (4. * q2))*(HTpt(q2).abs2() + HTmt(q2).abs2() + HT0t(q2).abs2()) +
694 16. / 3. * (
Mlep * Enu +
Mnu * Elep)*(HVp(q2) * HTpt(q2).conjugate() + HVm(q2) * HTmt(q2).conjugate() + HV0(q2) * HT0t(q2).conjugate()).imag() +
695 8. * M_SQRT2 / 3. * (
Mlep * Enu -
Mnu * Elep)*(HAp(q2) * HTp(q2).conjugate() + HAm(q2) * HTm(q2).conjugate() + HA0(q2) * HT0(q2).conjugate()).imag());
700 double lambda_MM = lambda_half(
MM*
MM,
MV*
MV, q2);
702 double Gprefactor = lambda_MM * lambda_lep / q2;
704 return Gprefactor * 4. / 3. * lambda_lep * ((HVp(q2) * HAp(q2).conjugate() - HVm(q2) * HAm(q2).conjugate()).real()+
705 (2. * M_SQRT2) / q2 * (
Mlep *
Mlep -
Mnu *
Mnu)*(HTp(q2) * HTpt(q2).conjugate() - HTm(q2) * HTmt(q2).conjugate()).real() +
706 2. * (
Mlep +
Mnu) /
sqrt(q2)*(HAp(q2) * HTpt(q2).conjugate() - HAm(q2) * HTmt(q2).conjugate()).imag() +
707 M_SQRT2 * (
Mlep -
Mnu) /
sqrt(q2)*(HVp(q2) * HTp(q2).conjugate() - HVm(q2) * HTm(q2).conjugate()).imag()-
708 (
Mlep -
Mnu) /
sqrt(q2)*(HA0(q2) * HP(q2).conjugate()).real()-(
Mlep +
Mnu) /
sqrt(q2)*(HV0(q2) * HS(q2).conjugate()).real()+
709 (M_SQRT2 * HT0(q2) * HP(q2).conjugate() + 2. * HT0t(q2) * HS(q2).conjugate()).imag());
715 double lambda_MM = lambda_half(
MM*
MM,
MV*
MV, q2);
717 double lambda_lep2 = lambda_lep*lambda_lep;
718 double Gprefactor = lambda_MM * lambda_lep / q2;
720 return -Gprefactor * 2. / 9. * lambda_lep2 / q2 * ((-HVp(q2).abs2() - HVm(q2).abs2() + 2. * HV0(q2).abs2() - HAp(q2).abs2() - HAm(q2).abs2() + 2. * HA0(q2).abs2()) -
721 2. * (2. * HT0(q2).abs2() - HTp(q2).abs2() - HTm(q2).abs2()) - 4. * (2. * HT0t(q2).abs2() - HTpt(q2).abs2() - HTmt(q2).abs2()));
726 double lambda_MM = lambda_half(
MM*
MM,
MV*
MV, q2);
728 double lambda_lep2 = lambda_lep*lambda_lep;
729 double Elep =
sqrt(
Mlep *
Mlep + lambda_lep2 / (4. * q2));
730 double Enu =
sqrt(
Mnu *
Mnu + lambda_lep2 / (4. * q2));
731 double Gprefactor = lambda_MM * lambda_lep / q2;
733 return Gprefactor * (-4. / 9. * (3. * Elep * Enu + lambda_lep2 / (4. * q2))*(HVp(q2).abs2() + HVm(q2).abs2() - 2. * HV0(q2).abs2() + HAp(q2).abs2() + HAm(q2).abs2() - 2. * HA0(q2).abs2()) -
734 4. / 3. *
Mlep *
Mnu * (HVp(q2).abs2() + HVm(q2).abs2() - 2. * HV0(q2).abs2() - HAp(q2).abs2() - HAm(q2).abs2() + 2. * HA0(q2).abs2()) +
735 8. / 3. * (Elep * Enu -
Mlep *
Mnu + lambda_lep2 / (4. * q2)) * HS(q2).abs2() + 8. / 3. * (Elep * Enu +
Mlep *
Mnu + lambda_lep2 / (4. * q2)) * HP(q2).abs2() -
736 16. / 9. * (3. * (Elep * Enu +
Mlep *
Mnu) - lambda_lep2 / (4. * q2))*(HTpt(q2).abs2() + HTmt(q2).abs2() - 2. * HT0t(q2).abs2()) - 8. / 9. * (3. * (Elep * Enu -
Mlep *
Mnu) - lambda_lep2 / (4. * q2))*
737 (HTp(q2).abs2() + HTm(q2).abs2() - 2 * HT0(q2).abs2()) - 16. / 3. * (
Mlep * Enu +
Mnu * Elep)*(HVp(q2) * HTpt(q2).conjugate() + HVm(q2) * HTmt(q2).conjugate() - 2. * HV0(q2) * HT0t(q2).conjugate()).imag() -
738 8. * M_SQRT2 / 3. * (
Mlep * Enu -
Mnu * Elep)*(HAp(q2) * HTp(q2).conjugate() + HAm(q2) * HTm(q2).conjugate() - 2. * HA0(q2) * HT0(q2).conjugate()).imag());
743 double lambda_MM = lambda_half(
MM*
MM,
MV*
MV, q2);
745 double Gprefactor = lambda_MM * lambda_lep / q2;
747 return -Gprefactor * 4. * lambda_lep / 3. * ((HVp(q2) * HAp(q2).conjugate() - HVm(q2) * HAm(q2).conjugate()).real() +
748 2. * M_SQRT2 * (
Mlep *
Mlep -
Mnu *
Mnu) / q2 * (HTp(q2) * HTpt(q2).conjugate() - HTm(q2) * HTmt(q2).conjugate()).real() +
749 2. * (
Mlep +
Mnu) /
sqrt(q2)*(HAp(q2) * HTpt(q2).conjugate() - HAm(q2) * HTmt(q2).conjugate()).imag() +
750 M_SQRT2 * (
Mlep -
Mnu) /
sqrt(q2)*(HVp(q2) * HTp(q2).conjugate() - HVm(q2) * HTm(q2).conjugate()).imag() +
751 2. * (
Mlep -
Mnu) /
sqrt(q2)*(HA0(q2) * HP(q2).conjugate()).real() + 2. * (
Mlep +
Mnu) /
sqrt(q2)*(HV0(q2) * HS(q2).conjugate()).real() -
752 2. * (M_SQRT2 * HT0(q2) * HP(q2).conjugate() + 2. * HT0t(q2) * HS(q2).conjugate()).imag());
757 double lambda_MM = lambda_half(
MM*
MM,
MV*
MV, q2);
759 double lambda_lep2 = lambda_lep*lambda_lep;
760 double Gprefactor = lambda_MM * lambda_lep / q2;
762 return -Gprefactor * 2. / 9. * lambda_lep2 / q2 * (HVp(q2).abs2() + HVm(q2).abs2() + 4. * HV0(q2).abs2() + HAp(q2).abs2() +
763 HAm(q2).abs2() + 4. * HA0(q2).abs2() - 2. * (HTp(q2).abs2() + HTm(q2).abs2() + 4. * HT0(q2).abs2()) -
764 4. * (HTpt(q2).abs2() + HTmt(q2).abs2() + 4. * HT0t(q2).abs2()));
769 double lambda_MM = lambda_half(
MM*
MM,
MV*
MV, q2);
771 double Gprefactor = lambda_MM * lambda_lep / q2;
773 return Gprefactor * 4. /
sqrt(3.) * lambda_lep * (HVp(q2) * HA0(q2).conjugate() + HAp(q2) * HV0(q2).conjugate() -
774 HV0(q2) * HAm(q2).conjugate() - HA0(q2) * HVm(q2).conjugate()+(
Mlep +
Mnu) /
sqrt(q2)*(HVp(q2) * HS(q2).conjugate() + HS(q2) * HVm(q2).conjugate()) -
775 gslpp::complex::i() * M_SQRT2 * (HP(q2) * HTm(q2).conjugate() - HTp(q2) * HP(q2).conjugate() + M_SQRT2 * (HS(q2) * HTmt(q2).conjugate() - HTpt(q2) * HS(q2).conjugate()))+
776 (
Mlep -
Mnu) /
sqrt(q2)*(HAp(q2) * HP(q2).conjugate() + HP(q2) * HAm(q2).conjugate()) -
777 gslpp::complex::i()*2. * (
Mlep +
Mnu) /
sqrt(q2)*(HAp(q2) * HT0t(q2).conjugate() + HT0t(q2) * HAm(q2).conjugate() - HTpt(q2) * HA0(q2).conjugate() - HA0(q2) * HTmt(q2).conjugate()) -
778 gslpp::complex::i() * M_SQRT2 * (
Mlep -
Mnu) /
sqrt(q2)*(HVp(q2) * HT0(q2).conjugate() + HT0(q2) * HVm(q2).conjugate() - HTp(q2) * HV0(q2).conjugate() - HV0(q2) * HTm(q2).conjugate()) +
779 2. * M_SQRT2 * (
Mlep *
Mlep -
Mnu *
Mnu) / q2 * (HTp(q2) * HT0t(q2).conjugate() + HTpt(q2) * HT0(q2).conjugate() - HT0(q2) * HTmt(q2).conjugate() - HT0t(q2) * HTm(q2).conjugate()));
784 double lambda_MM = lambda_half(
MM*
MM,
MV*
MV, q2);
786 double lambda_lep2 = lambda_lep*lambda_lep;
787 double Gprefactor = lambda_MM * lambda_lep / q2;
789 return Gprefactor * 4. / 3. * lambda_lep2 / q2 * (HVp(q2) * HV0(q2).conjugate() + HV0(q2) * HVm(q2).conjugate() +
790 HAp(q2) * HA0(q2).conjugate() + HA0(q2) * HAm(q2).conjugate() - 2. * (HTp(q2) * HT0(q2).conjugate() +
791 HT0(q2) * HTm(q2).conjugate() + 2. * (HTpt(q2) * HT0t(q2).conjugate() + HT0t(q2) * HTmt(q2).conjugate())));
796 double lambda_MM = lambda_half(
MM*
MM,
MV*
MV, q2);
798 double lambda_lep2 = lambda_lep*lambda_lep;
799 double Gprefactor = lambda_MM * lambda_lep / q2;
801 return -Gprefactor * 8. / 3. * lambda_lep2 / q2 * (HVp(q2) * HVm(q2).conjugate() + HAp(q2) * HAm(q2).conjugate() -
802 2. * (HTp(q2) * HTm(q2).conjugate() + 2. * HTpt(q2) * HTmt(q2).conjugate()));
808 double MVlnu::J1s(
double q2)
811 return amplsq_factor * (8. * G000(q2) + 2. * G020(q2) - 4. * G200(q2) - G220(q2)).real() / 3.;
814 double MVlnu::J1c(
double q2)
817 return amplsq_factor * (8. * G000(q2) + 2. * G020(q2) + 8. * G200(q2) + 2. * G220(q2)).real() / 3.;
820 double MVlnu::J2s(
double q2)
823 return amplsq_factor * (2. * G020(q2) - G220(q2)).real();
826 double MVlnu::J2c(
double q2)
829 return amplsq_factor * (2. * (G020(q2) + G220(q2))).real();
832 double MVlnu::J3(
double q2)
835 return amplsq_factor * (G222(q2).real());
838 double MVlnu::J4(
double q2)
841 return -amplsq_factor * (G221(q2).real());
844 double MVlnu::J5(
double q2)
847 return amplsq_factor * (2. * G211(q2).real() /
sqrt(3.));
850 double MVlnu::J6s(
double q2)
853 return -amplsq_factor * (4. * (2. * G010(q2) - G210(q2)).real() / 3.);
856 double MVlnu::J6c(
double q2)
859 if (q2 > (
MM -
MV)*(
MM -
MV))
return 0.;
860 return -amplsq_factor * (8. * (G010(q2) + G210(q2)).real() / 3.);
863 double MVlnu::J7(
double q2)
866 return -amplsq_factor * (2. *
sqrt(3.)*(G211(q2).imag()) / 3.);
869 double MVlnu::J8(
double q2)
872 return amplsq_factor * (G221(q2).imag());
875 double MVlnu::J9(
double q2)
878 return -amplsq_factor * (G222(q2).imag());
884 double MVlnu::integrateJ(
int i,
double q2_min,
double q2_max)
886 old_handler = gsl_set_error_handler_off();
890 if (
lep ==
StandardModel::TAU)
if ((checkcache_int_tau == 1) && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ1s_tau;
891 if (
lep ==
StandardModel::MU)
if ((checkcache_int_mu == 1) && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ1s_mu;
892 if (
lep ==
StandardModel::ELECTRON)
if ((checkcache_int_el == 1) && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ1s_el;
894 if (gsl_integration_cquad(&FJ, q2_min, q2_max, 1.e-2, 1.e-1, w_J, &J_res, &J_err, NULL) != 0) std::numeric_limits<double>::quiet_NaN();
895 gsl_set_error_handler(old_handler);
899 if (
lep ==
StandardModel::TAU)
if ((checkcache_int_tau == 1) && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ1c_tau;
900 if (
lep ==
StandardModel::MU)
if ((checkcache_int_mu == 1) && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ1c_mu;
901 if (
lep ==
StandardModel::ELECTRON)
if ((checkcache_int_el == 1) && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ1c_el;
903 if (gsl_integration_cquad(&FJ, q2_min, q2_max, 1.e-2, 1.e-1, w_J, &J_res, &J_err, NULL) != 0) std::numeric_limits<double>::quiet_NaN();
904 gsl_set_error_handler(old_handler);
908 if (
lep ==
StandardModel::TAU)
if ((checkcache_int_tau == 1) && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ2s_tau;
909 if (
lep ==
StandardModel::MU)
if ((checkcache_int_mu == 1) && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ2s_mu;
910 if (
lep ==
StandardModel::ELECTRON)
if ((checkcache_int_el == 1) && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ2s_el;
912 if (gsl_integration_cquad(&FJ, q2_min, q2_max, 1.e-2, 1.e-1, w_J, &J_res, &J_err, NULL) != 0) std::numeric_limits<double>::quiet_NaN();
913 gsl_set_error_handler(old_handler);
917 if (
lep ==
StandardModel::TAU)
if ((checkcache_int_tau == 1) && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ2c_tau;
918 if (
lep ==
StandardModel::MU)
if ((checkcache_int_mu == 1) && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ2c_mu;
919 if (
lep ==
StandardModel::ELECTRON)
if ((checkcache_int_el == 1) && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ2c_el;
921 if (gsl_integration_cquad(&FJ, q2_min, q2_max, 1.e-2, 1.e-1, w_J, &J_res, &J_err, NULL) != 0) std::numeric_limits<double>::quiet_NaN();
922 gsl_set_error_handler(old_handler);
926 if (
lep ==
StandardModel::TAU)
if ((checkcache_int_tau == 1) && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ3_tau;
927 if (
lep ==
StandardModel::MU)
if ((checkcache_int_mu == 1) && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ3_mu;
928 if (
lep ==
StandardModel::ELECTRON)
if ((checkcache_int_el == 1) && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ3_el;
930 if (gsl_integration_cquad(&FJ, q2_min, q2_max, 1.e-2, 1.e-1, w_J, &J_res, &J_err, NULL) != 0) std::numeric_limits<double>::quiet_NaN();
931 gsl_set_error_handler(old_handler);
932 gsl_set_error_handler(old_handler);
936 if (
lep ==
StandardModel::TAU)
if ((checkcache_int_tau == 1) && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ4_tau;
937 if (
lep ==
StandardModel::MU)
if ((checkcache_int_mu == 1) && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ4_mu;
938 if (
lep ==
StandardModel::ELECTRON)
if ((checkcache_int_el == 1) && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ4_el;
940 if (gsl_integration_cquad(&FJ, q2_min, q2_max, 1.e-2, 1.e-1, w_J, &J_res, &J_err, NULL) != 0) std::numeric_limits<double>::quiet_NaN();
941 gsl_set_error_handler(old_handler);
945 if (
lep ==
StandardModel::TAU)
if ((checkcache_int_tau == 1) && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ5_tau;
946 if (
lep ==
StandardModel::MU)
if ((checkcache_int_mu == 1) && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ5_mu;
947 if (
lep ==
StandardModel::ELECTRON)
if ((checkcache_int_el == 1) && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ5_el;
949 if (gsl_integration_cquad(&FJ, q2_min, q2_max, 1.e-2, 1.e-1, w_J, &J_res, &J_err, NULL) != 0) std::numeric_limits<double>::quiet_NaN();
950 gsl_set_error_handler(old_handler);
954 if (
lep ==
StandardModel::TAU)
if ((checkcache_int_tau == 1) && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ6s_tau;
955 if (
lep ==
StandardModel::MU)
if ((checkcache_int_mu == 1) && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ6s_mu;
956 if (
lep ==
StandardModel::ELECTRON)
if ((checkcache_int_el == 1) && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ6s_el;
958 if (gsl_integration_cquad(&FJ, q2_min, q2_max, 1.e-2, 1.e-1, w_J, &J_res, &J_err, NULL) != 0) std::numeric_limits<double>::quiet_NaN();
959 gsl_set_error_handler(old_handler);
963 if (
lep ==
StandardModel::TAU)
if ((checkcache_int_tau == 1) && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ6c_mu;
964 if (
lep ==
StandardModel::MU)
if ((checkcache_int_mu == 1) && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ6c_mu;
965 if (
lep ==
StandardModel::ELECTRON)
if ((checkcache_int_el == 1) && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ6c_el;
967 if (gsl_integration_cquad(&FJ, q2_min, q2_max, 1.e-2, 1.e-1, w_J, &J_res, &J_err, NULL) != 0) std::numeric_limits<double>::quiet_NaN();
971 if (
lep ==
StandardModel::TAU)
if ((checkcache_int_tau == 1) && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ7_tau;
972 if (
lep ==
StandardModel::MU)
if ((checkcache_int_mu == 1) && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ7_mu;
973 if (
lep ==
StandardModel::ELECTRON)
if ((checkcache_int_el == 1) && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ7_el;
975 if (gsl_integration_cquad(&FJ, q2_min, q2_max, 1.e-2, 1.e-1, w_J, &J_res, &J_err, NULL) != 0) std::numeric_limits<double>::quiet_NaN();
976 gsl_set_error_handler(old_handler);
980 if (
lep ==
StandardModel::TAU)
if ((checkcache_int_tau == 1) && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ8_tau;
981 if (
lep ==
StandardModel::MU)
if ((checkcache_int_mu == 1) && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ8_mu;
982 if (
lep ==
StandardModel::ELECTRON)
if ((checkcache_int_el == 1) && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ8_el;
984 if (gsl_integration_cquad(&FJ, q2_min, q2_max, 1.e-2, 1.e-1, w_J, &J_res, &J_err, NULL) != 0) std::numeric_limits<double>::quiet_NaN();
985 gsl_set_error_handler(old_handler);
989 if (
lep ==
StandardModel::TAU)
if ((checkcache_int_tau == 1) && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ9_tau;
990 if (
lep ==
StandardModel::MU)
if ((checkcache_int_mu == 1) && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ9_mu;
991 if (
lep ==
StandardModel::ELECTRON)
if ((checkcache_int_el == 1) && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ9_el;
993 if (gsl_integration_cquad(&FJ, q2_min, q2_max, 1.e-2, 1.e-1, w_J, &J_res, &J_err, NULL) != 0) std::numeric_limits<double>::quiet_NaN();
994 gsl_set_error_handler(old_handler);
998 gsl_set_error_handler(old_handler);
999 std::stringstream out;
1001 throw std::runtime_error(
"MVlnu::integrateJ: index " + out.str() +
" not implemented");
1005 double MVlnu::dGammadw(
double q2)
1009 return 3. / 4. * (2. * J1s(q2) + J1c(q2)) - 1. / 4. * (2. * J2s(q2) + J2c(q2));
1016 double q2_min = (2. *
MM *
MV)*(
w0 - w_max);
1017 double q2_max = (2. *
MM *
MV)*(
w0 - w_min);
1019 double intJ1s = integrateJ(1, q2_min, q2_max);
1020 double intJ1c = integrateJ(2, q2_min, q2_max);
1021 double intJ2s = integrateJ(3, q2_min, q2_max);
1022 double intJ2c = integrateJ(4, q2_min, q2_max);
1024 return 3. / 4. * (2. * intJ1s + intJ1c) - 1. / 4. * (2. * intJ2s + intJ2c);
1027 double MVlnu::dGammadcldq2(
double q2,
double cl)
1031 return 3. / 8. * ((J1s(q2) + 2. * J1c(q2)) + cl * (J6s(q2) + 2. * J6c(q2))+(2. * cl * cl - 1.)*(J2s(q2) + 2. * J2c(q2)));
1034 double MVlnu::dGammadcl(
double cl)
1038 double intJ1s = integrateJ(1, q2min, q2max);
1039 double intJ1c = integrateJ(2, q2min, q2max);
1040 double intJ2s = integrateJ(3, q2min, q2max);
1041 double intJ2c = integrateJ(4, q2min, q2max);
1042 double intJ6s = integrateJ(8, q2min, q2max);
1043 double intJ6c = integrateJ(9, q2min, q2max);
1045 return 3. / 8. * ((intJ1s + 2. * intJ1c) + cl * (intJ6s + 2. * intJ6c)+(2. * cl * cl - 1.)*(intJ2s + 2. * intJ2c));
1052 double intJ1s = integrateJ(1, q2min, q2max);
1053 double intJ1c = integrateJ(2, q2min, q2max);
1054 double intJ2s = integrateJ(3, q2min, q2max);
1055 double intJ2c = integrateJ(4, q2min, q2max);
1056 double intJ6s = integrateJ(8, q2min, q2max);
1057 double intJ6c = integrateJ(9, q2min, q2max);
1059 return 3. / 8. * ((cl_max - cl_min)*(intJ1c + 2. * intJ1s)+
1060 (cl_max * cl_max - cl_min * cl_min) / 2. * (intJ6c + 2. * intJ6s)+
1061 (2. / 3. * (cl_max * cl_max * cl_max - cl_min * cl_min * cl_min)-(cl_max - cl_min))*(intJ2c + 2. * intJ2s));
1064 double MVlnu::dGammadcVdq2(
double q2,
double cl)
1068 return 3. / 8. * ((J1s(q2) + 2. * J1c(q2)) + cl * (J6s(q2) + 2. * J6c(q2))+(2. * cl * cl - 1.)*(J2s(q2) + 2. * J2c(q2)));
1071 double MVlnu::dGammadcV(
double cV)
1075 double intJ1s = integrateJ(1, q2min, q2max);
1076 double intJ1c = integrateJ(2, q2min, q2max);
1077 double intJ2s = integrateJ(3, q2min, q2max);
1078 double intJ2c = integrateJ(4, q2min, q2max);
1080 return 3. / 8. * (cV * cV * (3. * intJ1c - intJ2c)+(1. - cV * cV)*(3. * intJ1s - intJ2s));
1087 double intJ1s = integrateJ(1, q2min, q2max);
1088 double intJ1c = integrateJ(2, q2min, q2max);
1089 double intJ2s = integrateJ(3, q2min, q2max);
1090 double intJ2c = integrateJ(4, q2min, q2max);
1092 return 3. / 8. * ((cV_max * cV_max * cV_max - cV_min * cV_min * cV_min) / 3. * (3. * intJ1c - intJ2c)+
1093 ((cV_max - cV_min)-(cV_max * cV_max * cV_max - cV_min * cV_min * cV_min) / 3.)*(3. * intJ1s - intJ2s));
1096 double MVlnu::dGammadchidq2(
double q2,
double chi)
1100 return (3. * J1c(q2) + 6. * J1s(q2) - J2c(q2) - 2. * J2s(q2)) / 8. / M_PI +
1101 cos(2. * chi) / 2. / M_PI * J3(q2) +
sin(2. * chi) / 2. / M_PI * J9(q2);
1104 double MVlnu::dGammadchi(
double chi)
1108 double intJ1s = integrateJ(1, q2min, q2max);
1109 double intJ1c = integrateJ(2, q2min, q2max);
1110 double intJ2s = integrateJ(3, q2min, q2max);
1111 double intJ2c = integrateJ(4, q2min, q2max);
1112 double intJ3 = integrateJ(5, q2min, q2max);
1113 double intJ9 = integrateJ(12, q2min, q2max);
1115 return ((3. * intJ1c + 6. * intJ1s - intJ2c - 2. * intJ2s) / 4. +
1116 cos(2. * chi) * intJ3 +
sin(2. * chi) * intJ9) / 2. / M_PI;
1123 double intJ1s = integrateJ(1, q2min, q2max);
1124 double intJ1c = integrateJ(2, q2min, q2max);
1125 double intJ2s = integrateJ(3, q2min, q2max);
1126 double intJ2c = integrateJ(4, q2min, q2max);
1127 double intJ3 = integrateJ(5, q2min, q2max);
1128 double intJ9 = integrateJ(12, q2min, q2max);
1130 return ((chi_max - chi_min)*(3. * intJ1c + 6. * intJ1s - intJ2c - 2. * intJ2s) / 4. +
1131 (
sin(2. * chi_max) -
sin(2. * chi_min)) / 2. * intJ3 -
1132 (
cos(2. * chi_max) -
cos(2. * chi_min)) / 2. * intJ9) / (2. * M_PI);
1139 double intJ1s = integrateJ(1, q2min, q2max);
1140 double intJ1c = integrateJ(2, q2min, q2max);
1141 double intJ2s = integrateJ(3, q2min, q2max);
1142 double intJ2c = integrateJ(4, q2min, q2max);
1144 double DeltaJL = (3. * intJ1c - intJ2c) / 4.;
1145 double DeltaJ = 3. / 4. * (2. * intJ1s + intJ1c) - 1. / 4. * (2. * intJ2s + intJ2c);
1146 return DeltaJL/DeltaJ;
1154 return ag0 * ag0 + ag1 * ag1 + ag2*ag2;
1162 double aF10 = (
MM -
MV)*(phi_F1(0.) / phi_f(0.)) * af0;
1163 return af0 * af0 + af1 * af1 + af2 * af2 + aF10 * aF10 + aF11 * aF11 + aF12*aF12;
1170 double z0 = (
sqrt(
w0 + 1.) - M_SQRT2) / (
sqrt(
w0 + 1.) + M_SQRT2);
1171 double PfacF2z0 = (z0 - zP1) / (1. - z0 * zP1)*(z0 - zP2) / (1. - z0 * zP2)*(z0 - zP3) / (1. - z0 * zP3);
1172 double phiF2z0 = phi_F2(z0);
1173 double aF20 = PfacF2z0 * phiF2z0 * 2. * F1_BGL(0.) / (
MM *
MM -
MV *
MV) - aF21 * z0 - aF22 * z0*z0;
1174 return aF20 * aF20 + aF21 * aF21 + aF22*aF22;
1187 double q2 = (2. *
MM *
MV)*(
w0 - w);
1198 else return V(q2) *
RV / hA1(q2);
1207 else return A2(q2) *
RV / hA1(q2);
1213 double q2 = (2. *
MM *
MV)*(
w0 - w);
1216 else return A0(q2) *
RV / hA1(q2);
1224 double MVlnu::Hplus(
double q2){
1225 double abs_p = lambda_half(
MM*
MM,
MV*
MV,q2);
1226 return (
MM+
MV)*A1(q2)-2.*
MM/(
MM+
MV)*abs_p*V(q2);
1229 double MVlnu::Hminus(
double q2){
1230 double abs_p = lambda_half(
MM*
MM,
MV*
MV,q2);
1231 return (
MM+
MV)*A1(q2)+2.*
MM/(
MM+
MV)*abs_p*V(q2);
1234 double MVlnu::H0(
double q2){
1235 double abs_p = lambda_half(
MM*
MM,
MV*
MV,q2);
1239 double MVlnu::H0t(
double q2){
1240 double abs_p = lambda_half(
MM*
MM,
MV*
MV,q2);
1241 return 2.*
MM*abs_p*A0(q2)/
sqrt(q2);
1244 double MVlnu::dGpdq2(
double q2){
1248 double abs_p = lambda_half(
MM*
MM,
MV*
MV,q2);
1250 return 2./3.*amplsq_factor*abs_p*q2*lep_factor*(Hplus(q2)*Hplus(q2)+Hminus(q2)*Hminus(q2)+H0(q2)*H0(q2)
1251 +3.*H0t(q2)*H0t(q2));
1254 double MVlnu::dGmdq2(
double q2){
1258 double abs_p = lambda_half(
MM*
MM,
MV*
MV,q2);
1260 return 2./3.*amplsq_factor*abs_p*q2*lep_factor*(Hplus(q2)*Hplus(q2)+Hminus(q2)*Hminus(q2)+H0(q2)*H0(q2));
1263 double MVlnu::integrateGpm(
int i,
double q2_min,
double q2_max)
1265 old_handler = gsl_set_error_handler_off();
1270 if (gsl_integration_cquad(&FJ, q2_min, q2_max, 1.e-2, 1.e-1, w_J, &J_res, &J_err, NULL) != 0) std::numeric_limits<double>::quiet_NaN();
1271 gsl_set_error_handler(old_handler);
1276 if (gsl_integration_cquad(&FJ, q2_min, q2_max, 1.e-2, 1.e-1, w_J, &J_res, &J_err, NULL) != 0) std::numeric_limits<double>::quiet_NaN();
1277 gsl_set_error_handler(old_handler);
1281 gsl_set_error_handler(old_handler);
1282 std::stringstream out;
1284 throw std::runtime_error(
"MVlnu::integrateGpm: index " + out.str() +
" not implemented");
1292 double DeltaGammaPlus = integrateGpm(1,q2min,q2max);
1293 double DeltaGammaMinus = integrateGpm(2,q2min,q2max);
1295 return (DeltaGammaPlus-DeltaGammaMinus)/(DeltaGammaPlus+DeltaGammaMinus);