}a]4%{	rj?003Fj`PKTB_6T*ZMyTE4OKT-p^z"G:Jk&D	byd] }Y7Io3!4_5m&wQ<L>*ϱtߩ[@hE:'x1TV,њRE>_΁NXhhF	&1χG)P?In8oeL|EeEƬcAAn$B=VVJ=tE G>5R=L/5d+"DۼqoN\Q`|h]Rx\JoŗV:1Ϯ;:ZQ|jykphwc(X\{hEԹ"n9y>OF=Qk>44Vbϰ}*!ΘڈL#B[AӕQ,i[Z&Dv }+L7Ξs1Ln6Z=2H(tq!a(A\B^ *&Ox\IՑ6  qA.wv7G_C[vG	`,+=jL-h==<5^?<<}X|J6A|CRΏCёtUs/$u܍9pg4u`bP	M/jv@H6Zdv7"d|	{h\4)	q)#NbAcwI q^d V`%HtBָٔ P6ÒiDpf~$NrZzl[g uҮcr^?[Oh<톟yu MX)`fkmomy.S9"ex\#a1jYɌ.Z5%fVwpd;X@Z㏫io>c{ӽ1թK[_c2?R=p@;t̰u8kS(P-. -#fh/d$F!-US<fK1XSRtK#ReRB,.EF~ǨF[@ݽMdBw[0.~&OzRPi(Gg<<b߮T.@7g͇}}JvZQd>n+:m
~"$O8	XIp08xbߐf,Hz8@ή28J>9'&k]zM@v)͐-_U9`%<CTG_)LKE(Wq%]K|JM<p'vFm&Di0к<g?YV%ԩTwy8׍4'q+vOaʳD0"_,"v+n'Y-Z@$=4A[!aW󠩾N?	" when these CPUS are turned online.
- *
- * Not using broadcast is a cleaner approach IMO, but Andi Kleen disagrees with
- * the idea of not using broadcast IPI's anymore. Hence the run time check
- * is introduced, on his request so we can choose an alternate mechanism.
- *
- * Initial wacky performance tests that collect cycle counts show
- * no increase in using mask v.s broadcast version. In fact they seem
- * identical in terms of cycle counts.
- *
- * if we need to use broadcast, we need to do the following.
- *
- * cli;
- * hold call_lock;
- * clear any pending IPI, just ack and clear all pending intr
- * set cpu_online_map;
- * release call_lock;
- * sti;
- *
- * The complicated dummy irq processing shown above is not required if
- * we didnt sent IPI's to wrong CPU's in the first place.
- *
- * - Ashok Raj <ashok.raj@intel.com>
- */
-#ifdef CONFIG_HOTPLUG_CPU
-#define DEFAULT_SEND_IPI	(1)
-#else
-#define DEFAULT_SEND_IPI	(0)
-#endif
-
-static int no_broadcast=DEFAULT_SEND_IPI;
-
 static cpumask_t flat_target_cpus(void)
 {
 	return cpu_online_map;
@@ -119,37 +76,15 @@ static void flat_send_IPI_mask(cpumask_t
 	local_irq_restore(flags);
 }
 
-static inline void __local_flat_send_IPI_allbutself(int vector)
-{
-	if (no_broadcast) {
-		cpumask_t mask = cpu_online_map;
-		int this_cpu = get_cpu();
-
-		cpu_clear(this_cpu, mask);
-		flat_send_IPI_mask(mask, vector);
-		put_cpu();
-	}
-	else
-		__send_IPI_shortcut(APIC_DEST_ALLBUT, vector, APIC_DEST_LOGICAL);
-}
-
-static inline void __local_flat_send_IPI_all(int vector)
-{
-	if (no_broadcast)
-		flat_send_IPI_mask(cpu_online_map, vector);
-	else
-		__send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL);
-}
-
 static void flat_send_IPI_allbutself(int vector)
 {
 	if (((num_online_cpus()) - 1) >= 1)
-		__local_flat_send_IPI_allbutself(vector);
+		__send_IPI_shortcut(APIC_DEST_ALLBUT, vector,APIC_DEST_LOGICAL);
 }
 
 static void flat_send_IPI_all(int vector)
 {
-	__local_flat_send_IPI_all(vector);
+	__send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL);
 }
 
 static int flat_apic_id_registered(void)
@@ -170,16 +105,6 @@ static unsigned int phys_pkg_id(int inde
 	return ((ebx >> 24) & 0xFF) >> index_msb;
 }
 
-static __init int no_ipi_broadcast(char *str)
-{
-	get_option(&str, &no_broadcast);
-	printk ("Using %s mode\n", no_broadcast ? "No IPI Broadcast" :
-											"IPI Broadcast");
-	return 1;
-}
-
-__setup("no_ipi_broadcast", no_ipi_broadcast);
-
 struct genapic apic_flat =  {
 	.name = "flat",
 	.int_delivery_mode = dest_LowestPrio,
@@ -194,12 +119,3 @@ struct genapic apic_flat =  {
 	.cpu_mask_to_apicid = flat_cpu_mask_to_apicid,
 	.phys_pkg_id = phys_pkg_id,
 };
-
-static int __init print_ipi_mode(void)
-{
-	printk ("Using IPI %s mode\n", no_broadcast ? "No-Shortcut" :
-											"Shortcut");
-	return 0;
-}
-
-late_initcall(print_ipi_mode);
_
