```//* Fromula Evaluation, Integer vs. Floating Point *
#include <stdio.h>

int main()
{
int     ia=2,   ib=3,   ic=4,   i;
double  da=2.0, db=3.0, dc=4.0, d;

printf("integer numbers: \n");
i = ia+ib*ic;   printf("%d\n", i);
i = ia+(ib*ic); printf("%d\n", i);
i = (ia+ib)*ic; printf("%d\n", i);
i = ia/ib;      printf("%d\n", i); // surprise
d = ia/ib;      printf("%lf\n", d); // ok, store in double and check again

printf("floating point numbers:\n");
d = da+db*dc;   printf("%lf\n", d);
d = da+(db*dc); printf("%lf\n", d);
d = (da+db)*dc; printf("%lf\n", d);
d = da/db;      printf("%lf\n", d);

printf("\"new\" features:\n");
i = 12 % 5;     printf("%d\n", i); // new operation, it is not calculator %
i = ic % ib;    printf("%d\n", i);
//  d = dc % db;    printf("%d\n", i); // % works only with positive integers

printf("integer vs. floating point operation:\n");
d = ia/ib;      printf("%lf\n", d);
d = da/db;      printf("%lf\n", d);
d = ia/db;      printf("%lf\n", d);
d = da/ib;      printf("%lf\n", d);

printf("integer vs. floating point operation:\n");
d = ia*ib/ic;   printf("%lf\n", d);
d = (ia*ib)/ic; printf("%lf\n", d);
d = ia*(ib/ic); printf("%lf\n", d);
d = da*ib/ic;   printf("%lf\n", d);
d = da*(ib/ic); printf("%lf\n", d);

printf("type conversion:\n");
d = ((double)ia)*ib/ic; printf("%lf\n", d);
d = (double)ia*ib/ic;   printf("%lf\n", d); // it apples to next variable only, () not required here

i = ia/db;          printf("%d\n", i);  // generates compiler warning
// warning - compiler suspects an error but technically we could have
//           done it on purpose and it is possible to trnaslate this
//           suspicious notation, better avoid this kind of notation
i = (int)(ia/db);   printf("%d\n", i);
// ... we tell the compiler that we really meant to do it so that there is no warning

return 0;
}

```