balance.c 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345
  1. /* balance.c
  2. * Balance IRQs.
  3. */
  4. #include <stdlib.h>
  5. #include <stdio.h>
  6. #include <string.h>
  7. #include <sys/types.h>
  8. #include <dirent.h>
  9. #include <limits.h>
  10. #include <ctype.h>
  11. #include "statistics.h"
  12. #include "cpu.h"
  13. #include "irq.h"
  14. #include "balance.h"
  15. /* Drop the dont_move flag on all IRQs for specified CPU */
  16. static int dec_weight(cpu_t *cpu, int value)
  17. {
  18. lub_list_node_t *iter;
  19. if (!cpu)
  20. return -1;
  21. if (value < 0)
  22. return -1;
  23. for (iter = lub_list_iterator_init(cpu->irqs); iter;
  24. iter = lub_list_iterator_next(iter)) {
  25. irq_t *irq;
  26. irq = (irq_t *)lub_list_node__get_data(iter);
  27. if (irq->weight >= value)
  28. irq->weight -= value;
  29. }
  30. return 0;
  31. }
  32. /* Remove IRQ from specified CPU */
  33. static int remove_irq_from_cpu(irq_t *irq, cpu_t *cpu)
  34. {
  35. lub_list_node_t *node;
  36. if (!irq || !cpu)
  37. return -1;
  38. irq->cpu = NULL;
  39. node = lub_list_search(cpu->irqs, irq);
  40. if (!node)
  41. return 0;
  42. lub_list_del(cpu->irqs, node);
  43. lub_list_node_free(node);
  44. return 0;
  45. }
  46. /* Move IRQ to specified CPU. Remove IRQ from the IRQ list
  47. of old CPU. */
  48. static int move_irq_to_cpu(irq_t *irq, cpu_t *cpu)
  49. {
  50. if (!irq || !cpu)
  51. return -1;
  52. if (irq->cpu) {
  53. cpu_t *old_cpu = irq->cpu;
  54. remove_irq_from_cpu(irq, old_cpu);
  55. dec_weight(old_cpu, 1);
  56. }
  57. dec_weight(cpu, 1);
  58. irq->cpu = cpu;
  59. lub_list_add(cpu->irqs, irq);
  60. return 0;
  61. }
  62. /* Search for the best CPU. Best CPU is a CPU with minimal load.
  63. If several CPUs have the same load then the best CPU is a CPU
  64. with minimal number of assigned IRQs */
  65. static cpu_t *choose_cpu(lub_list_t *cpus, cpumask_t *cpumask, float threshold)
  66. {
  67. lub_list_node_t *iter;
  68. lub_list_t * min_cpus = NULL;
  69. float min_load = 100.00;
  70. lub_list_node_t *node;
  71. cpu_t *cpu = NULL;
  72. for (iter = lub_list_iterator_init(cpus); iter;
  73. iter = lub_list_iterator_next(iter)) {
  74. cpu = (cpu_t *)lub_list_node__get_data(iter);
  75. if (!cpu_isset(cpu->id, *cpumask))
  76. continue;
  77. if (cpu->load >= threshold)
  78. continue;
  79. if ((!min_cpus) || (cpu->load < min_load)) {
  80. min_load = cpu->load;
  81. if (!min_cpus)
  82. min_cpus = lub_list_new(cpu_list_compare_len);
  83. while ((node = lub_list__get_tail(min_cpus))) {
  84. lub_list_del(min_cpus, node);
  85. lub_list_node_free(node);
  86. }
  87. lub_list_add(min_cpus, cpu);
  88. }
  89. if (cpu->load == min_load)
  90. lub_list_add(min_cpus, cpu);
  91. }
  92. if (!min_cpus)
  93. return NULL;
  94. node = lub_list__get_head(min_cpus);
  95. cpu = (cpu_t *)lub_list_node__get_data(node);
  96. while ((node = lub_list__get_tail(min_cpus))) {
  97. lub_list_del(min_cpus, node);
  98. lub_list_node_free(node);
  99. }
  100. lub_list_free(min_cpus);
  101. return cpu;
  102. }
  103. static int irq_set_affinity(irq_t *irq, cpumask_t *cpumask)
  104. {
  105. char path[PATH_MAX];
  106. char buf[NR_CPUS + 1];
  107. FILE *fd;
  108. if (!irq)
  109. return -1;
  110. snprintf(path, sizeof(path),
  111. "%s/%u/smp_affinity", PROC_IRQ, irq->irq);
  112. path[sizeof(path) - 1] = '\0';
  113. if (!(fd = fopen(path, "w")))
  114. return -1;
  115. cpumask_scnprintf(buf, sizeof(buf), *cpumask);
  116. buf[sizeof(buf) - 1] = '\0';
  117. if ((fprintf(fd, "%s\n", buf) < 0) || (fflush(fd) == EOF)) {
  118. /* The affinity for some IRQ can't be changed. So don't
  119. consider such IRQs. The example is IRQ 0 - timer.
  120. Blacklist this IRQ. Note fprintf() without fflush()
  121. will not return I/O error due to buffers. */
  122. irq->blacklisted = 1;
  123. remove_irq_from_cpu(irq, irq->cpu);
  124. printf("Blacklist IRQ %u\n", irq->irq);
  125. }
  126. fclose(fd);
  127. return 0;
  128. }
  129. /* Find best CPUs for IRQs need to be balanced. */
  130. int balance(lub_list_t *cpus, lub_list_t *balance_irqs, float threshold)
  131. {
  132. lub_list_node_t *iter;
  133. for (iter = lub_list_iterator_init(balance_irqs); iter;
  134. iter = lub_list_iterator_next(iter)) {
  135. irq_t *irq;
  136. cpu_t *cpu;
  137. irq = (irq_t *)lub_list_node__get_data(iter);
  138. /* Try to find local CPU to move IRQ to.
  139. The local CPU is CPU with native NUMA node. */
  140. cpu = choose_cpu(cpus, &(irq->local_cpus), threshold);
  141. /* If local CPU is not found then try to use
  142. CPU from another NUMA node. It's better then
  143. overloaded CPUs. */
  144. /* Non-local CPUs were disabled. It seems there is
  145. no advantages to use them. The all interactions will
  146. be held by QPI-like interfaces through local CPUs. */
  147. /* if (!cpu) {
  148. cpumask_t complement;
  149. cpus_complement(complement, irq->local_cpus);
  150. cpu = choose_cpu(cpus, &complement, threshold);
  151. }
  152. */
  153. if (cpu) {
  154. if (irq->cpu)
  155. printf("Move IRQ %u from CPU%u to CPU%u\n",
  156. irq->irq, irq->cpu->id, cpu->id);
  157. else
  158. printf("Move IRQ %u to CPU%u\n", irq->irq, cpu->id);
  159. move_irq_to_cpu(irq, cpu);
  160. }
  161. }
  162. return 0;
  163. }
  164. int apply_affinity(lub_list_t *balance_irqs)
  165. {
  166. lub_list_node_t *iter;
  167. for (iter = lub_list_iterator_init(balance_irqs); iter;
  168. iter = lub_list_iterator_next(iter)) {
  169. irq_t *irq;
  170. irq = (irq_t *)lub_list_node__get_data(iter);
  171. if (!irq->cpu)
  172. continue;
  173. irq_set_affinity(irq, &(irq->cpu->cpumask));
  174. }
  175. return 0;
  176. }
  177. /* Count the number of intr-not-null IRQs and minimal IRQ weight */
  178. static int irq_list_info(lub_list_t *irqs, int *min_weight,
  179. unsigned int *irq_num, unsigned int *candidates_num)
  180. {
  181. lub_list_node_t *iter;
  182. if (!irqs)
  183. return -1;
  184. if (min_weight)
  185. *min_weight = -1;
  186. if (irq_num)
  187. *irq_num = 0;
  188. if (candidates_num)
  189. *candidates_num = 0;
  190. for (iter = lub_list_iterator_init(irqs); iter;
  191. iter = lub_list_iterator_next(iter)) {
  192. irq_t *irq = (irq_t *)lub_list_node__get_data(iter);
  193. if (irq->intr == 0)
  194. continue;
  195. if (min_weight) {
  196. if ((*min_weight < 0) || (irq->weight < *min_weight))
  197. *min_weight = irq->weight;
  198. }
  199. if (irq_num)
  200. *irq_num += 1;
  201. if (irq->weight)
  202. continue;
  203. if (candidates_num)
  204. *candidates_num += 1;
  205. }
  206. return 0;
  207. }
  208. /* Search for most overloaded CPU */
  209. static cpu_t * most_overloaded_cpu(lub_list_t *cpus, float threshold)
  210. {
  211. lub_list_node_t *iter;
  212. cpu_t *overloaded_cpu = NULL;
  213. float max_load = 0.0;
  214. /* Search for the most overloaded CPU.
  215. The load must be greater than threshold. */
  216. for (iter = lub_list_iterator_init(cpus); iter;
  217. iter = lub_list_iterator_next(iter)) {
  218. cpu_t *cpu = (cpu_t *)lub_list_node__get_data(iter);
  219. int min_weight = -1;
  220. unsigned int irq_num = 0;
  221. if (cpu->load < threshold)
  222. continue;
  223. if (cpu->load <= max_load)
  224. continue;
  225. /* Don't move last IRQ */
  226. if (lub_list_len(cpu->irqs) <= 1)
  227. continue;
  228. irq_list_info(cpu->irqs, &min_weight, &irq_num, NULL);
  229. /* All IRQs has intr=0 */
  230. if (irq_num == 0)
  231. continue;
  232. if (min_weight > 0)
  233. dec_weight(cpu, min_weight);
  234. /* Ok, it's good CPU to try to free it */
  235. max_load = cpu->load;
  236. overloaded_cpu = cpu;
  237. }
  238. return overloaded_cpu;
  239. }
  240. /* Search for the overloaded CPUs and then choose best IRQ for moving to
  241. another CPU. The best IRQ is IRQ with maximum number of interrupts.
  242. The IRQs with small number of interrupts have very low load or very
  243. high load (in a case of NAPI). */
  244. int choose_irqs_to_move(lub_list_t *cpus, lub_list_t *balance_irqs,
  245. float threshold, birq_choose_strategy_e strategy)
  246. {
  247. lub_list_node_t *iter;
  248. cpu_t *overloaded_cpu = NULL;
  249. irq_t *irq_to_move = NULL;
  250. unsigned long long max_intr = 0;
  251. unsigned long long min_intr = (unsigned long long)(-1);
  252. unsigned int choose = 0;
  253. unsigned int current = 0;
  254. /* Search for overloaded CPUs */
  255. if (!(overloaded_cpu = most_overloaded_cpu(cpus, threshold)))
  256. return 0;
  257. if (strategy == BIRQ_CHOOSE_RND) {
  258. unsigned int candidates = 0;
  259. irq_list_info(overloaded_cpu->irqs, NULL, NULL, &candidates);
  260. if (candidates == 0)
  261. return 0;
  262. choose = rand() % candidates;
  263. }
  264. /* Search for the IRQ (owned by overloaded CPU) with
  265. maximum/minimum number of interrupts. */
  266. for (iter = lub_list_iterator_init(overloaded_cpu->irqs); iter;
  267. iter = lub_list_iterator_next(iter)) {
  268. irq_t *irq = (irq_t *)lub_list_node__get_data(iter);
  269. /* Don't move any IRQs with intr=0. It can be unused IRQ. In
  270. this case the moving is not needed. It can be overloaded
  271. (by NAPI) IRQs. In this case it will be not moved anyway. */
  272. if (irq->intr == 0)
  273. continue;
  274. if (irq->weight)
  275. continue;
  276. if (strategy == BIRQ_CHOOSE_MAX) {
  277. /* Get IRQ with max intr */
  278. if (irq->intr > max_intr) {
  279. max_intr = irq->intr;
  280. irq_to_move = irq;
  281. }
  282. } else if (strategy == BIRQ_CHOOSE_MIN) {
  283. /* Get IRQ with min intr */
  284. if (irq->intr < min_intr) {
  285. min_intr = irq->intr;
  286. irq_to_move = irq;
  287. }
  288. } else if (strategy == BIRQ_CHOOSE_RND) {
  289. if (current == choose) {
  290. irq_to_move = irq;
  291. break;
  292. }
  293. }
  294. current++;
  295. }
  296. if (irq_to_move) {
  297. /* Don't move this IRQ while next iteration. */
  298. irq_to_move->weight = 1;
  299. lub_list_add(balance_irqs, irq_to_move);
  300. }
  301. return 0;
  302. }