<?xml version="1.0" encoding="utf-8" ?>
<rdf:RDF xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:syn="http://purl.org/rss/1.0/modules/syndication/" xmlns="http://purl.org/rss/1.0/">




    



<channel rdf:about="https://cis-india.org/search_rss">
  <title>Centre for Internet and Society</title>
  <link>https://cis-india.org</link>
  
  <description>
    
            These are the search results for the query, showing results 191 to 205.
        
  </description>
  
  
  
  
  <image rdf:resource="https://cis-india.org/logo.png"/>

  <items>
    <rdf:Seq>
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/news/privacy-in-the-digital-age-addressing-common-challenges-seizing-opportunities"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/blog/indian-intermediary-liability-regime"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/blog/epw-amber-sinha-may-18-2018-for-indias-data-protection-regime-to-be-efficient-policymakers-should-treat-privacy-as-a-social-good"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/news/hack-read-waqas-may-15-2018-indian-cricket-board-exposes-personal-data-of-thousands-of-players"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/news/the-wire-karan-saini-may-11-2018-aadhaar-remains-an-unending-security-nightmare-for-a-billion-indians"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/news/ozy-aayush-soni-may-11-2018-indias-national-id-project-brings-pain-to-those-it-aims-to-help"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/news/artificial-intelligence-for-growth-leveraging-ai-and-robotics-for-indias-economic-transformation"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/news/economic-times-may-2-2018-cyber-experts-say-playground-open-for-influencing-elections"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/news/ani-may-2-2018-data-usage-by-political-parties"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/news/hindustan-times-april-30-2018-prasun-sonwalkar-vidhi-choudhury-now-twitter-too-caught-up-in-cambridge-analytica-controversy"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/news/new-indian-express-april-26-2018-aadhaar-data-over-89-lakh-mnrega-workers-in-andhra-pradesh-leaked-online"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/news/pai-wg-labor-and-economy-meeting"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/blog/asia-times-april-20-2018-aayush-rathi-sunil-abraham-what-s-up-with-whatsapp"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/blog/artificial-intelligence-in-governance-a-report-of-the-roundtable-held-in-new-delhi"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/blog/government-giving-free-publicity-worth-40-k-to-twitter-and-facebook"/>
        
    </rdf:Seq>
  </items>

</channel>


    <item rdf:about="https://cis-india.org/internet-governance/news/privacy-in-the-digital-age-addressing-common-challenges-seizing-opportunities">
    <title>Privacy in the Digital Age: Addressing Common Challenges, Seizing Opportunities</title>
    <link>https://cis-india.org/internet-governance/news/privacy-in-the-digital-age-addressing-common-challenges-seizing-opportunities</link>
    <description>
        &lt;b&gt;DG Justice and Consumers and European Union is organizing a conference on privacy in the digital age on May 25, 2018 in New Delhi.&lt;/b&gt;
        
&lt;h3 style="text-align: center;"&gt;&lt;img src="https://cis-india.org/home-images/copy_of_India_posterwall_20180517page001.jpg/@@images/bc1bb559-cf77-4518-b4d3-a367e5a2f04f.jpeg" alt="null" class="image-inline" title="India Poster Wall" /&gt;&lt;/h3&gt;
&lt;hr /&gt;
&lt;h3 style="text-align: justify;"&gt;Agenda&lt;/h3&gt;
&lt;p style="text-align: justify;"&gt;Friday 25 May 2018, Reception to follow, The Lalit Hotel, Barakhamba Avenue, Connaught Place, New Delhi, India&lt;/p&gt;
&lt;ul style="text-align: justify;"&gt;
&lt;li&gt;9:00 a.m. Registration and welcome coffee&lt;/li&gt;
&lt;li&gt;9:20 a.m. Welcome: Vera Jourova, EU Commissioner for Justice and Consumers (by video)&lt;/li&gt;
&lt;li&gt;9:30 a.m. Opening remarks: Justice B.N. Srikrishna, chair of the Committee of Experts on a Data Protection Framework for India &lt;br /&gt;Tomasz Kozlowski, Ambassador of the European Union to India&lt;/li&gt;&lt;/ul&gt;
&lt;p style="text-align: justify;"&gt;10:00 a.m. &lt;strong&gt;Panel 1 - Setting the scene: India at the crossroads&lt;/strong&gt;&lt;/p&gt;
&lt;ul style="text-align: justify;"&gt;
&lt;li&gt;Moderator: Sunil Abraham, Executive Director, Centre for Internet and Society, India&lt;br /&gt;Vinayak Godse, Senior Director, Data Protection, Data Security Council of India&amp;nbsp;&lt;br /&gt;Raman Jit Singh Chima, Policy Director, Access Now, India&lt;br /&gt;Amba Kak, Public Policy Advisor, Mozilla, India&lt;/li&gt;
&lt;li&gt;11:00 a.m.: Coffee break&lt;/li&gt;&lt;/ul&gt;
&lt;p style="text-align: justify;"&gt;11:15 a.m. &lt;strong&gt;Panel 2 - Modern data protection laws: towards global convergence&lt;/strong&gt;&lt;/p&gt;
&lt;ul style="text-align: justify;"&gt;
&lt;li&gt;Moderator: Clarisse Girot, Data Privacy Project Lead, Asian Business Law Institute, Singapore&lt;br /&gt;Ralf Sauer, Deputy Head of Unit, International data flows and protection, European Commission, Brussels &lt;br /&gt;Malavika Jayaram, Executive Director, Digital Asia Hub, Hong Kong&lt;br /&gt;Graham Greenleaf, Professor of Law &amp;amp; Information Systems, University of New South Wales, Australia (by video)&lt;/li&gt;&lt;/ul&gt;
&lt;p style="text-align: justify;"&gt;12:15 p.m. &lt;strong&gt;Panel 3 - Privacy and data security: a business opportunity&lt;/strong&gt;&lt;/p&gt;
&lt;ul style="text-align: justify;"&gt;
&lt;li&gt;Moderator: Ralf Sauer, Deputy Head of Unit,&amp;nbsp;International data flows and protection, European Commission, Brussels&lt;br /&gt;Srinivas Poorsarla, Vice President and Head (Global), Privacy and Data Protection, Infosys, India&lt;br /&gt;Ravi Sogi, Head - Product Security and Privacy, Philips&lt;br /&gt;Riccardo Masucci, Global Director of Privacy Policy, Intel, Washington DC&lt;/li&gt;&lt;/ul&gt;
&lt;p style="text-align: justify;"&gt;1:15 p.m.: Reception&lt;/p&gt;

        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/news/privacy-in-the-digital-age-addressing-common-challenges-seizing-opportunities'&gt;https://cis-india.org/internet-governance/news/privacy-in-the-digital-age-addressing-common-challenges-seizing-opportunities&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Admin</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Privacy</dc:subject>
    

   <dc:date>2018-05-24T10:45:56Z</dc:date>
   <dc:type>News Item</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/blog/indian-intermediary-liability-regime">
    <title>Indian Intermediary Liability Regime: Compliance with the Manila Principles on Intermediary Liability</title>
    <link>https://cis-india.org/internet-governance/blog/indian-intermediary-liability-regime</link>
    <description>
        &lt;b&gt;This report assesses the compliance of the Indian intermediary liability framework with the Manila Principles on Intermediary Liability, and recommends substantive legislative changes to bring the legal framework in line with the Manila Principles. &lt;/b&gt;
        &lt;p&gt;&lt;span style="text-align: justify; "&gt;The report was edited by Elonnai Hickok and Swaraj Barooah&lt;/span&gt;&lt;/p&gt;
&lt;hr /&gt;
&lt;p style="text-align: justify; "&gt;The report is an examination of Indian laws based upon the background paper to the Manila Principles as the explanatory text on which these recommendations have been based, and not an assessment of the principles themselves. To do this, the report considers the Indian regime in the context of each of the principles defined in the Manila Principles. As such, the explanatory text to the Manila Principles recognizes that diverse national and political scenario may require different intermediary liability legal regimes, however, this paper relies only on the best practices prescribed under the Manila Principles.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;The report is divided into the following sections&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;Principle I: Intermediaries should be shielded by law from liability for third-party content&lt;/li&gt;
&lt;li&gt;Principle II: Content must not be required to be restricted without an order by a judicial authority&lt;/li&gt;
&lt;li&gt;Principle III: Requests for restrictions of content must be clear, be unambiguous, and follow due process&lt;/li&gt;
&lt;li&gt;Principle IV: Laws and content restriction orders and practices must comply with the tests of necessity and proportionality&lt;/li&gt;
&lt;li&gt;
&lt;div id="_mcePaste"&gt;Principle V: Laws and content restriction policies and practices must respect due process&lt;/div&gt;
&lt;/li&gt;
&lt;li&gt;
&lt;div id="_mcePaste"&gt;Principle VI: Transparency and accountability must be built into laws and content restriction policies and practices&lt;/div&gt;
&lt;/li&gt;
&lt;li&gt;
&lt;div id="_mcePaste"&gt;Conclusion&lt;/div&gt;
&lt;/li&gt;
&lt;/ul&gt;
&lt;p style="text-align: justify; "&gt;&lt;a class="external-link" href="http://cis-india.org/internet-governance/files/indian-intermediary-liability-regime"&gt;Download the Full report here&lt;/a&gt;&lt;/p&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/blog/indian-intermediary-liability-regime'&gt;https://cis-india.org/internet-governance/blog/indian-intermediary-liability-regime&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>divij</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Intermediary Liability</dc:subject>
    
    
        <dc:subject>Privacy</dc:subject>
    

   <dc:date>2018-05-20T15:14:21Z</dc:date>
   <dc:type>Blog Entry</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/blog/epw-amber-sinha-may-18-2018-for-indias-data-protection-regime-to-be-efficient-policymakers-should-treat-privacy-as-a-social-good">
    <title>India's Data Protection Framework Will Need to Treat Privacy as a Social and Not Just an Individual Good</title>
    <link>https://cis-india.org/internet-governance/blog/epw-amber-sinha-may-18-2018-for-indias-data-protection-regime-to-be-efficient-policymakers-should-treat-privacy-as-a-social-good</link>
    <description>
        &lt;b&gt;The idea that technological innovations may compete with privacy of individuals assumes that there is social and/or economic good in allowing unrestricted access to data. However, it must be remembered that data is potentially a toxic asset, if it is not collected, processed, secured and shared in the appropriate way.&lt;/b&gt;
        &lt;div class="field-label-hidden      field-type-text-with-summary field-name-body field" style="text-align: justify; "&gt;
&lt;div class="field-items"&gt;
&lt;div class="even field-item"&gt;
&lt;p&gt;Published in Economic &amp;amp; Political Weekly, Volume 53, Issue No. 18, 05 May, 2018. Article can be &lt;a class="external-link" href="http://www.epw.in/engage/article/for-indias-data-protection-regime-to-be-efficient-policymakers-should-treat-privacy-as-a-social-good"&gt;accessed online here&lt;/a&gt;.&lt;/p&gt;
&lt;hr /&gt;
&lt;p&gt;In             July 2017, the Ministry of Electronics and Information             Technology (MeITy) in India set up a committee headed by a             former judge, B N Srikrishna, to address the growing clamour             for privacy protections at a time when both private             collection of data and public projects like Aadhaar are             reported to pose major privacy risks (Maheshwari 2017). The             Srikrishna Committee is in the process of providing its             input, which will go on to inform India’s data-protection             law.&lt;/p&gt;
&lt;p&gt;While             the committee released a white paper with provisional views,             seeking feedback a few months ago, it may be discussing a             data protection framework without due consideration to how             data practices have evolved.&lt;/p&gt;
&lt;p&gt;In             early 2018, a series of stories based on investigative             journalism by &lt;em&gt;Guardian&lt;/em&gt;and &lt;em&gt;Observer&lt;/em&gt; revealed             that the data of 87 million Facebook users was used for the             Trump campaign by a political consulting firm, Cambridge             Analytica, without their permissions. Aleksandr Kogan, a             psychology researcher at the University of Cambridge,             created an application called “thisisyourdigitallife” and             collected data from 270,000 participants through a             personality test using Facebook’s application programming             interface (API), which allows developers to integrate with             various parts of the Facebook platform (Fruchter et al             2018). This data was collected purportedly for academic             research purposes only. Kogan’s application also collected             profile data from each of the participants’ friends, roughly             87 million people.&lt;/p&gt;
&lt;p&gt;The             kinds of practices concerning the sharing and processing of             data exhibited in this case are not unique. These are, in             fact, common to the data economy in India as well. It can be             argued that the Facebook–Cambridge Analytica incident is             representative of data practices in the data-driven digital             economy. These new practices pose important questions for             data protection laws globally, and how these may need to             evolve to address data protection, particularly for India,             which is in the process of drafting its own data protection             law.&lt;/p&gt;
&lt;h2&gt;&lt;strong&gt;Privacy as Control&lt;/strong&gt;&lt;/h2&gt;
&lt;p&gt;Most             modern data protection laws focus on individual control. In             this context, the definition by the late Alan Westin             (2015) characterises privacy as:&lt;/p&gt;
&lt;blockquote style="padding-left: 20px; "&gt;
&lt;p&gt;The claim               of individuals, groups, or institutions to determine for               themselves when, how, and to what extent information about               them is communicated to other.&lt;/p&gt;
&lt;/blockquote&gt;
&lt;p&gt;The             idea of “privacy as control” is what finds articulation in             data protection policies across jurisdictions, beginning             with the Fair Information Practice Principles (FIPP) from             the United States (US) (Dixon 2006). These FIPPs are the             building blocks of modern information privacy law (Schwartz             1999) and not only play a significant role in the             development of privacy laws in the US, but also inform data             protection laws in most privacy regimes internationally             (Rotenberg 2001), including the nine “National Privacy             Principles” articulated by the Justice A P Shah Committee in             India. Much of this approach is also reflected in the white             paper released by the committee, led by Justice Srikrishna,             towards the creation of data protection laws in India             (Srikrishna 2017)&lt;/p&gt;
&lt;p&gt;This             approach essentially involves the following steps (Cate             2006):&lt;/p&gt;
&lt;p&gt;(i)             Data controllers are required to tell individuals what data             they wish to collect and use and give them a choice to share             the data. &lt;br /&gt; (ii) Upon sharing, the individuals have rights such as being             granted access, and data controllers have obligations such             as securing the data with appropriate technologies and             procedures, and only using it for the purposes identified.&lt;/p&gt;
&lt;p&gt;The             objective in this approach is to make the individual             empowered and allow them to weigh their own interests in             exercising their consent. The allure of this paradigm is             that, in one elegant stroke, it seeks to “ensure that             consent is informed and free and thereby also (seeks) to             implement an acceptable tradeoff between privacy and             competing concerns.” (Sloan and Warner 2014). This approach             is also easy to enforce for both regulators and businesses.             Data collectors and processors only need to ensure that they             comply with their privacy policies, and can thus reduce             their liability while, theoretically, consumers have the             information required to exercise choice. In recent years,             however, the emergence of big data, the “Internet of             Things,” and algorithmic decision-making has significantly             compromised the notice and consent model (Solove 2013).&lt;/p&gt;
&lt;h2&gt;&lt;strong&gt;Limitations of Consent &lt;/strong&gt;&lt;/h2&gt;
&lt;p&gt;Some             cognitive problems, such as long and difficult to understand             privacy notices, have always existed with regard to the             issue of informed consent, but lately these problems have             become aggravated. Privacy notices often come in the form of             long legal documents, much to the detriment of the readers’             ability to understand them. These policies are “long,             complicated, full of jargon and change frequently” (Cranor             2012).&lt;/p&gt;
&lt;p&gt;Kent             Walker (2001) lists five problems that privacy notices             typically suffer from:&lt;/p&gt;
&lt;p&gt;(i)             Overkill: Long and repetitive text in small print.&lt;br /&gt; (ii) Irrelevance: Describing situations of little concern to             most consumers.&lt;br /&gt; (iii) Opacity: Broad terms that reflect limited truth, and             are unhelpful to track and control the information collected             and stored.&lt;br /&gt; (iv) Non-comparability: Simplification required to achieve             comparability will lead to compromising of accuracy.&lt;br /&gt; (v) Inflexibility: Failure to keep pace with new business             models.&lt;/p&gt;
&lt;p&gt;Today,             data is collected continuously with every use of online             services, making it humanly impossible to exercise             meaningful consent. &lt;br /&gt; The quantity of data being generated is expanding at an             exponential rate. With connected devices, smartphones,             appliances transmitting data about our usage, and even the             smart cities themselves, data now streams constantly from             almost every sector and function of daily life, “creating             countless new digital puddles, lakes, tributaries and oceans             of information” (Bollier 2010).&lt;/p&gt;
&lt;p&gt;The             infinitely complex nature of the data ecosystem renders             consent of little value in cases where individuals may be             able to read and comprehend privacy notices. As the uses of             data are so diverse, and often not limited by a purpose             identified at the beginning, individuals cannot             conceptualise how their data will be aggregated and possibly             used or reused.&lt;/p&gt;
&lt;p&gt;Seemingly             innocuous bits of data revealed at different stages could be             combined to reveal sensitive information about the             individual. While the regulatory framework is designed such             that individuals are expected to engage in cost–benefit             analysis of trading their data to avail services, this             ecosystem makes such individual analysis impossible.&lt;/p&gt;
&lt;h2&gt;&lt;strong&gt;Conflicts Between Big Data               and Individual Control&lt;/strong&gt;&lt;/h2&gt;
&lt;p&gt;The             thrust of big data technologies is that the value of data             resides not in its primary purposes, but in its numerous             secondary purposes, where data is reused many times over             (Schoenberger and Cukier 2013).&lt;/p&gt;
&lt;p&gt;On             the other hand, the idea of privacy as control draws from             the “data minimisation” principle, which requires             organisations to limit the collection of personal data to             the minimum extent necessary to obtain their legitimate             purpose and to delete data no longer required. Control is             excercised and privacy is enhanced by ensuring data             minimisation. These two concepts are in direct conflict.             Modern data-driven businesses want to retain as much data as             possible for secondary uses. Since these secondary uses are,             by their nature, unanticipated, their practices run counter             to the very principle of purpose limitation (Tene and             Polonetsky 2012).&lt;/p&gt;
&lt;p&gt;It             is evident from such data-sharing practices, as demonstrated             by the Cambridge Analytica–Facebook story, that platform             architectures are designed with a clear view to collect as             much data as possible. This is amply demonstrated by the             provision of a “friends permission” feature by Facebook on             its platform to allow individuals to share information not             just about themselves, but also about their friends. For the             principle of informed consent to be meaningfully             implemented, it is necessary for users to have access to             information about intended data practices, purposes and             usage, so they consciously share data about themselves.&lt;/p&gt;
&lt;p&gt;In             reality, however, privacy policies are more likely to serve             as liability disclaimers for companies than any kind of             guarantee of privacy for consumers. A case in point is Mark             Zuckerberg’s facile claim that there was no “data-breach" in             the Cambridge Analytica–Facebook incident. Instead of asking             each of the 87 million users whether they wanted their data             to be collected and shared further, Facebook designed a             platform that required consent in any form only from 270,000             users. Not only were users denied the opportunity to give             consent, their consent was assumed through a feature which             was on by default. This is representative of how privacy             trade-offs are conceived by current data-driven business             models. Participation in a digital ecosystem is by itself             deemed as users’ consent to relinquish control over how             their data is collected, who may have access to it, and what             purposes it may be used for.&lt;/p&gt;
&lt;p&gt;Yet,             Zuckerberg would have us believe that the primary privacy             issue of concern is not about how his platform enabled the             collection of users’ data without their explicit consent,             but in the subsequent unauthorised sharing of the data by             Kogan. Zuckerberg’s insistence that collection of data of             people without their consent is not a data breach is             reminiscent of the UIDAI’s recent claims in India that             publication of Aadhaar numbers and related information by             several government websites  is not a data breach, so long             as its central biometric database in secure (Sharma 2018).             In such cases also, the intended architecture ensured the             seeding of other databases with Aadhaar numbers, thus             creating multiple potential points of failure through             disclosure. Similarly, the design flaws in direct benefit             transfers enabled Airtel to create payments bank accounts             with the customers’ knowledge (&lt;em&gt;Hindu Business Line 2017&lt;/em&gt;). Such claims             clearly suggest the very limited responsibility data             controllers (both public and private) are willing to take             for personal data that they collect, while wilfully             facilitating and encouraging data practices which may lead             to greater risk to data.&lt;/p&gt;
&lt;p&gt;On             this note, it is also relevant to point out that the             Srikrishna committee white paper begins with identifying             informational privacy and data innovation as its two key             objectives. It states that “a firm legal framework for data             protection is the foundation on which data-driven innovation             and entrepreneurship can flourish in India.”&lt;span&gt; &lt;/span&gt;&lt;/p&gt;
&lt;p&gt;Conversations             around privacy and data have become inevitably linked to the             idea of technological innovation as a competing interest.             Before engaging in such conversations, it is important to             acknowledge that the value of innovation as a competing             interest itself is questionable. It is not a competing             right, nor a legitimate public interest endeavour, nor a             proven social good.&lt;/p&gt;
&lt;p&gt;The             idea that in policymaking, technological innovations may             compete with privacy of individuals assumes that there is             social and/or economic good in allowing unrestricted access             to data. The social argument is premised on the promises of             mathematical models and computational capacity being capable             of identifying key insights from data. In turn, these             insights may be useful in public and private             decision-making. However, it must be remembered that data is             potentially a toxic asset, if it is not collected,             processed, secured and shared in the appropriate way.             Sufficient research suggests that indiscriminate data             collection is greatly increasing the ratio of noise to             signal, and can lead to erroneous insights. Further, the             greater the amount of data you collect, the greater is the             attack surface that leads to cybersecurity risks. Further,             incidents such as Facebook–Cambridge Analytica demonstrate             that toxicity of data in various ways and underscores the             need for data regulation at every stage of the data             lifecycle (Scheiner  2016). These are important tempering             factors that need to be kept in mind while evaluating data             innovation as a key mover of policy or regulation.&lt;/p&gt;
&lt;h2&gt;&lt;strong&gt;Privacy as Social Good&lt;/strong&gt;&lt;/h2&gt;
&lt;p&gt;As             long as privacy is framed as arising primarily from             individual control, data controllers will continue to engage             in practices that compromise the ability to exercise choice.             There is a need to view privacy as a social good, and             policymaking should ensure its preservation and enhancement.             Contractual protections and legal sanctions can themselves             do little if platform architectures are designed to do the             exact opposite.&lt;/p&gt;
&lt;p&gt;More             importantly, policymaking needs to recognise privacy not             merely as an individual right, available for individuals to             forego when engaging with data-driven business models, but             also as a social good. The recognition of something as a             social good deems it desirable by definition, and a             legitimate goal of law and policy, rather than rely             completely on market forces for its achievement.&lt;/p&gt;
&lt;p&gt;The             Puttaswamy judgment (K Puttaswamy v Union of India             2017) lends sufficient weight to privacy’s social value by             identifying it as fundamental to any individual development             through its dependence on solitude, anonymity, and temporary             releases from social duties.&lt;/p&gt;
&lt;p&gt;Sociological             scholarship demonstrates that different types of social             relationships, be it Gesellschaft (interest groups and             acquaintances) or Gemeinschaft (friendship, love, and             marriage), and the nature of these relationships depend on             the ability to conceal certain things (Simmel 1906).             Demonstrating this in the context of friendships, it has             been stated that such relationships “present a very peculiar             synthesis in regard to the question of discretion, of             reciprocal revelation and concealment.” Friendships, much             like most other social relationships, are very much             dependent on our ability to selectively present ourselves to             others. Contrast this with Zuckerberg’s stated aim of making             the world more “open” where information about people flows             freely and effectively without any individual control.             Contrast this also with government projects such as the             Aadhaar which intends to act as one universal identity which             can provide a 360-degree view of citizens.&lt;/p&gt;
&lt;p&gt;Other             scholars such as Julie Cohen (2012) and Anita Allen (2011)             have demonstrated that data that a person produces or has             control over concerns both herself and others. Individuals             can be exposed not only because of their own actions and             choices, but also made vulnerable merely because others have             been careless with their data. This point is amply             demonstrated in the Facebook–Cambridge Analytica incident.             What this means is that protection of privacy requires not             just individual action, but in a sense, requires group             co-ordination. It is my argument that this group interest of             privacy as a social good must be the basis of policymaking             and regulation of data in the future, in addition to the             idea of privacy as an individual right. In the absence of             attention to the social good aspect of privacy, individual             consumers are left to their own devices to negotiate  their             privacy trade-offs with large companies and governments and             are significantly compromised.&lt;/p&gt;
&lt;p&gt;What             this translates into is a regulatory framework and data             protection frameworks should not be value-neutral in their             conception of privacy as a facet of individual control. The             complete reliance of data regulation on the data subject to             make an informed choice is, in my opinion, an idea that has             run its course. If privacy is viewed as a social good, then             the data protection framework, including the laws and the             architecture must be designed with a view to protect it,             rather than leave it entirely to the market forces.&lt;/p&gt;
&lt;h2&gt;&lt;strong&gt;The Way Forward&lt;/strong&gt;&lt;/h2&gt;
&lt;p&gt;Data             protection laws need to be re-evaluated, and policymakers             must recognise Lawrence Lessig’s dictum that “code is law.”             Like laws, architecture and norms can play a fundamental             role in regulation. Regulatory intervention for technology             need not mean regulation of technology only, but also how             technology itself may be leveraged for regulation (Lessig             2006; Reidenberg 1998). It is key that the latter is not             left only in the hands of private players. &lt;br /&gt; Zuckerberg, in his testimony (&lt;em&gt;Washington Post&lt;/em&gt; 2018) before             the United States Senate's Commerce and Judiciary             committees, asserted that "AI tools" are central to any             strategy for addressing hate speech, fake news, and             manipulations that use data ecosystems for targeting.&lt;/p&gt;
&lt;p&gt;What             is most concerning in his testimony is the complete lack of             mention of standards, public scrutiny and peer-review             processes, which “AI tools” and regulatory technologies need             to be subject to. Further, it cannot be expected that             data-driven businesses will view privacy as a social good or             be publicly accountable.&lt;/p&gt;
&lt;p&gt;As             policymakers in India gear up for writing the country’s data             protection law, they must acknowledge that their             responsibility extends to creating norms and principles that             will inform future data-driven platforms and regulatory             technologies.&lt;/p&gt;
&lt;p&gt;Since             issues of privacy and data protection will have to be             increasingly addressed at the level of how architectures             enable data collection, and more importantly how data is             used after collection, policymakers must recognise that             being neutral about these practices is no longer enough.             They must take normative positions on data collection,             processing and sharing practices. These positions cannot be             implemented through laws only, but need to be translated             into technological solutions and norms.  Unless a             multipronged approach comprising laws, architecture and             norms is adopted, India’s new data protection regime may end             up with limited efficacy.&lt;/p&gt;
&lt;/div&gt;
&lt;/div&gt;
&lt;/div&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/blog/epw-amber-sinha-may-18-2018-for-indias-data-protection-regime-to-be-efficient-policymakers-should-treat-privacy-as-a-social-good'&gt;https://cis-india.org/internet-governance/blog/epw-amber-sinha-may-18-2018-for-indias-data-protection-regime-to-be-efficient-policymakers-should-treat-privacy-as-a-social-good&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>amber</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Privacy</dc:subject>
    

   <dc:date>2018-05-18T06:22:57Z</dc:date>
   <dc:type>Blog Entry</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/news/hack-read-waqas-may-15-2018-indian-cricket-board-exposes-personal-data-of-thousands-of-players">
    <title>Indian Cricket Board Exposes Personal Data of Thousands of Players</title>
    <link>https://cis-india.org/internet-governance/news/hack-read-waqas-may-15-2018-indian-cricket-board-exposes-personal-data-of-thousands-of-players</link>
    <description>
        &lt;b&gt;The IT security researchers at Kromtech Security Center discovered a trove of personal and sensitive data belonging to around 15,000 to 20,000 Indian applicants participating in cricket seasons 2015-2018.&lt;/b&gt;
        &lt;p style="text-align: justify; "&gt;The blog post was published on &lt;a class="external-link" href="https://www.hackread.com/indian-cricket-board-exposes-data-of-cricketers/"&gt;Hack Read&lt;/a&gt; on May 15, 2018.&lt;/p&gt;
&lt;hr /&gt;
&lt;p style="text-align: justify; "&gt;The authority responsible for protecting this data was The Board of Control for Cricket in India (BCCI) but it was left exposed to the public in two misconfigured AWS (Amazon Web Service) S3 cloud storage buckets.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="https://mackeepersecurity.com/post/bcci-exposed-players-personal-sensitive-data" rel="noopener" target="_blank"&gt;According to the analysis&lt;/a&gt; from Kromtech researchers, the data was divided into different categories of players including those under 19 years old. The data was accessible to anyone with an Internet connection and basic knowledge of using AWS cloud storage.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;The data was discovered earlier this month and included names, date of birth, place of birth, permanent addresses, email IDs, proficiency details, medical records, birth certificate number, passport number, SSC certificate number, PAN card number, mobile number, landline and phone number of the person who can be contacted in case of emergency.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;img alt="Indian Cricket Board Exposes Personal Data of Thousands of Players" src="https://www.hackread.com/wp-content/uploads/2018/05/indian-cricket-board-exposes-personal-data-of-thousands-of-players-1.png?x62286" /&gt;&lt;/p&gt;
&lt;p&gt;Screenshot of one of the files that were exposed (Image credit: Kromtech)&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;At the time of publishing this article, the BCCI was informed by Kromtech researchers and both misconfigured buckets were secured. However, this is not the first time when such sensitive information was leaked online. In 2017, Bangalore-based Centre for Internet and Society (CIS) &lt;a href="https://www.hackread.com/indian-biometric-system-data-leaked/" rel="noopener" target="_blank"&gt;found that&lt;/a&gt; names, addresses, date of birth, PAN card details, Aadhaar card numbers and other relevant details of millions of Indian citizen could be found with just a simple Google search.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;On the other hand, lately, AWS buckets have been &lt;a href="https://www.hackread.com/localblox-exposes-millions-of-facebook-linkedin-data/" rel="noopener" target="_blank"&gt;making headlines for the wrong reasons&lt;/a&gt;. Until now, there have been tons of cases in which misconfigured AWS buckets have been found carrying highly sensitive and confidential data &lt;a href="https://www.hackread.com/unprotected-s3-cloud-bucket-exposed-100gb-of-classified-nsa-data/" rel="noopener" target="_blank"&gt;such as classified NSA documents&lt;/a&gt; or details about &lt;a href="https://www.hackread.com/misconfigured-amazon-s3-buckets-exposed-us-militarys-social-media-spying-campaign/" rel="noopener" target="_blank"&gt;US Military’s social media spying campaign&lt;/a&gt;.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;In two such cases, malicious hackers were able to compromise AWS buckets belonging to &lt;a href="https://www.hackread.com/hackers-compromise-tesla-cloud-server-to-mine-cryptocurrency/" rel="noopener" target="_blank"&gt;Tesla Motors&lt;/a&gt; and &lt;a href="https://www.hackread.com/la-times-website-hacked-mine-monero-cryptocurrency/" rel="noopener" target="_blank"&gt;LA Times&lt;/a&gt; to secretly mine cryptocurrency. Therefore, if you are an AWS user make sure your cloud server is properly secured.&lt;/p&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/news/hack-read-waqas-may-15-2018-indian-cricket-board-exposes-personal-data-of-thousands-of-players'&gt;https://cis-india.org/internet-governance/news/hack-read-waqas-may-15-2018-indian-cricket-board-exposes-personal-data-of-thousands-of-players&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Admin</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Aadhaar</dc:subject>
    
    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Privacy</dc:subject>
    

   <dc:date>2018-05-18T05:01:50Z</dc:date>
   <dc:type>News Item</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/news/the-wire-karan-saini-may-11-2018-aadhaar-remains-an-unending-security-nightmare-for-a-billion-indians">
    <title>Aadhaar Remains an Unending Security Nightmare for a Billion Indians</title>
    <link>https://cis-india.org/internet-governance/news/the-wire-karan-saini-may-11-2018-aadhaar-remains-an-unending-security-nightmare-for-a-billion-indians</link>
    <description>
        &lt;b&gt;Yesterday was the 38th and last day of hearings in the Supreme Court case challenging the constitutional validity of India’s biometric authentication programme. After weeks of arguments from both sides, the Supreme Court has now reserved the matter for judgement.&lt;/b&gt;
        &lt;p style="text-align: justify; "&gt;The article by Karan Saini was published in the &lt;a class="external-link" href="https://thewire.in/government/aadhaar-remains-an-unending-security-nightmare-for-a-billion-indians"&gt;Wire&lt;/a&gt; on May 11, 2018.&lt;/p&gt;
&lt;hr /&gt;
&lt;p style="text-align: justify; "&gt;Since its inception, the Aadhaar project has lurched from controversy to scandal. In the last two years, the debate has heavily centred around issues of data security, privacy and government overreach. This debate, unfortunately, like with most things Aadhaar, has been obfuscated in no small part due to the manner in which the Unique Identification Authority of India (UIDAI) reacts to critical public discussion.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;As India waits for the apex court’s judgement, this is as good time as any to take stock of the security and privacy flaws underpinning the Aadhaar ecosystem.&lt;/p&gt;
&lt;h3 style="text-align: justify; "&gt;Poor security standards&lt;/h3&gt;
&lt;p style="text-align: justify; "&gt;Let’s start with the lackadaisical attitude towards information security. As has become evident in the &lt;a href="https://cis-india.org/internet-governance/information-security-practices-of-aadhaar-or-lack-thereof/view" target="_blank"&gt;past&lt;/a&gt;, harvesting and collecting Aadhaar numbers – or acquiring scans and prints of valid Aadhaar cards – has become a trivial matter.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;There are several government websites which implement Aadhaar authentication while at the same time lack in basic security practices such as the use of SSL to encrypt user traffic and/or the use of captchas to protect against brute-force or scraping attacks. This includes the biometric attendance website of the &lt;a href="http://dgftbct.attendance.gov.in/register/myemp" rel="noopener" target="_blank"&gt;Director General of Foreign Trade&lt;/a&gt;, the website for the &lt;a href="http://nfsm.gov.in/dbt/aadhaarverification.aspx" rel="noopener" target="_blank"&gt;National Food Security Mission&lt;/a&gt; and the &lt;a href="http://medleaprhry.gov.in/PvtAddRecord.aspx" rel="noopener" target="_blank"&gt;Medleapr website&lt;/a&gt;.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;With numerous government websites being susceptible, problematic issues such as the use of open directories to store sensitive data gives us a look into how even the bare minimum – when it comes to adhering to security best practices – isn’t enforced across the gamut of websites which interface with Aadhaar.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;It should not be acceptable practice to have government websites with open web directories containing PDF scans of dozens of Aadhaar cards available for just about anyone to view and/or download. Yet, over the past year and even before, many government websites have been found to either inadvertently or knowingly publish this information without much regard for the potential consequences it could have.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;The UIDAI has repeatedly shown an attitude of hostility and dismissiveness when it comes to fixing security and privacy issues which are present in the Aadhaar ecosystem. It has also shown no signs of how it plans to tackle this problem.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;In my personal experience as a security researcher, I have found and reported a cache of more than 40,000 scanned Aadhaar cards being available through an unsecured database managed by a private company, which relied on those scans for the purposes of verifying and maintaining records of their customers.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;What’s worse is that the media reports regarding Aadhaar information being exposed may only be scratching the surface of the issue as more data may actually be susceptible to access and theft, and simply yet to be found and publicly reported. For example, data could be leaking through publicly available data stores of third-party companies interfacing with Aadhaar, or through inadequately secured API and sensitive portals without proper access controls.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Not all security incidents become a matter of public knowledge, so what we know at any given point about the illegal exposure of Aadhaar information may just be a glimpse of what is actually out there.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;It should be acknowledged that the possession of these 12-digit numbers and their corresponding demographic information can open up room for potential fraud –  or at the very least make it easier for criminals to carry out identity theft and SIM and banking fraud.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;A &lt;a href="https://thewire.in/economy/aadhaar-fraud-uidai" target="_blank"&gt;detailed analysis&lt;/a&gt; of all publicly-reported Aadhaar-related or Aadhaar-enabled fraud over the last few years shows that the problem is not only real but deserves far more attention than what it has received so far.&lt;/p&gt;
&lt;h3 style="text-align: justify; "&gt;Threat level infinity&lt;/h3&gt;
&lt;p style="text-align: justify; "&gt;Taking a step back, it’s clear that the Aadhaar project snowballed into an ecosystem that it now struggles to control.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;For instance, demographic information – as is stated in the draft for the &lt;a href="https://www.uidai.gov.in/images/the_aadhaar_act_2016.pdf" rel="noopener" target="_blank"&gt;Aadhaar Act&lt;/a&gt; (NIDAI Bill 2010) – was originally considered confidential information, meaning no entity could request your demographic information such as name, address, phone number etc. for purposes of eKYC.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;However, as the ecosystem has progressed, the implementation and usage of eKYC have also changed and grown significantly with companies like PayTM utilising eKYC for the purposes of requesting and verifying customer information. It should be considered that data which has been collected by any of these companies through Aadhaar can be accessed by them in the future for an indefinite period of time depending on their own policies regarding storage and retention of the data.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;If there ever is a breach of the CIDR or a mirrored silo containing a significant amount of Aadhaar-related data, it would directly affect more than one billion people. To put this in perspective, it would easily be the single largest breach of data in terms of the sheer number of people affected &lt;i&gt;and&lt;/i&gt; it would have far-reaching consequences for everyone affected which might be very hard to offset.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;On a comparatively smaller scale – although just as serious, if not more in terms of potential implications – would be a breach of any given state’s resident data hub (SRDH) repository. In some cases, SRDHs &lt;a href="https://www.thenewsminute.com/article/13-lakh-aadhaar-numbers-leaked-andhra-govt-website-linked-personal-details-80178" rel="noopener" target="_blank"&gt;have been known to integrate data&lt;/a&gt; acquired from other sources containing information regarding parameters such as caste, banking details, religion, employment status, salaries, and &lt;a href="https://webcache.googleusercontent.com/search?q=cache:-HMXusc-Nm4J:https://mpsrdh.gov.in/aboutUsCitizen.html+&amp;amp;cd=2&amp;amp;hl=en&amp;amp;ct=clnk&amp;amp;gl=in&amp;amp;client=firefox-b-ab" rel="noopener" target="_blank"&gt;then linking the same&lt;/a&gt; to residents’ corresponding Aadhaar data.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Damage control would be costly and painstaking due to the number of people enrolled. What adds to the disastrous consequences is that one cannot just deactivate their Aadhaar or opt-out of the programme the way they would with say a compromised Facebook or Twitter account. You can always deactivate Facebook. You cannot deactivate your Aadhaar. It should be noted that even with biometrics set to ‘disabled’, Aadhaar verification transactions can be verified through OTP.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Additionally, the Aadhaar ecosystem is such that information about individuals can be accessed not just from UIDAI servers but also from other third-party databases where Aadhaar numbers are linked with their own respective datasets. Due to this aspect – multiple points of failure are introduced for possible compromise of data, especially because third-party databases are almost certainly not as secure as the CIDR.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Recently, after taking a closer look at the ecosystem of websites which incorporate the use of Aadhaar based authentication, I &lt;a href="https://www.karansaini.com/extracting-aadhaar-linked-phone-numbers/" rel="noopener" target="_blank"&gt;discovered that it was possible&lt;/a&gt; to extract the phone number linked to any given Aadhaar through the use of websites which poorly implemented Aadhaar text-based (OTP) authentication.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;This process worked by first retrieving the last four digits of the phone number linked to an Aadhaar using any website which reveals this information (this includes DigiLocker, NFSM.gov.in and seems to be standard practice which seems to be enforced by UIDAI) and then performing an enumeration attack on the first six digits using websites which allow the user to provide both their Aadhaar number and the verified phone number linked to it.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;This again highlights that while secure practices might be followed by the UIDAI, the errors in implementation and other flaws are introduced neverthelessby third parties who interface with Aadhaar, posing a risk to the privacy and security of its data.&lt;/p&gt;
&lt;h3 style="text-align: justify; "&gt;The bank mapper rabbit hole&lt;/h3&gt;
&lt;p style="text-align: justify; "&gt;As of February 24, 2017, it &lt;a href="https://thewire.in/government/india-inc-needs-to-fix-numerous-basic-%20information-security-flaws-quickly)" target="_blank"&gt;was possible&lt;/a&gt; to retrieve bank linking status information directly from UIDAI’s website without any prior verification.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;However, after this information was reported, the ‘&lt;a href="https://uidai.gov.in/" rel="noopener" target="_blank"&gt;uidai.gov.in&lt;/a&gt;’ website was updated to first require requesters to prove their identity before retrieving Aadhaar bank-linking data from the endpoint on their website.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;A year later – when business technology news site &lt;i&gt;ZDNet &lt;/i&gt;published their report regarding a flawed API on the website of a state-owned utility company (later revealed to be Indane) – part of the data revealed included bank linking status information which was identical to what was previously revealed on UIDAI’s website without proper authentication.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;This suggests that both the Indane API and UIDAI website utilised the National Payments Corporation of India (NPCI) to retrieve bank-linking data – but as of now, this remains conjecture since Indane never put out a statement or gave a public comment regarding the flawed API on their website.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;More importantly, what this also suggests is that the NPCI never placed any controls or security mechanisms (such as request throttling or access controls) on the lookup requests it processed for the UIDAI (and seemingly for Indane as well).&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;This means that while the UIDAI may have fixed their website to not reveal bank linking data without proper verification – the issue was not rectified at its core by the NPCI – allowing the same to happen a year later in Indane’s case. This practice also classifies as a case of security through obscurity, &lt;a href="http://users.softlab.ntua.gr/~taver/security/secur3.html" rel="noopener" target="_blank"&gt;which&lt;/a&gt; “is the belief that a system of any sort can be secure so long as nobody outside of its implementation group is allowed to find out anything about its internal mechanisms”.&lt;/p&gt;
&lt;h3 style="text-align: justify; "&gt;Who is on the hook?&lt;/h3&gt;
&lt;p style="text-align: justify; "&gt;There is a lack of needed accountability when it comes to data breaches. Have any of the organisations against whom allegations of data breach been made been investigated and acted on? Have fines been imposed on those responsible for allowing access/theft of user data? Have there been reports published by any of the affected organisations in which they investigate any alleged breaches to either provide insight regarding the breach and its impact, the scale of data accessed, logs of access and other crucial evidence or dismiss the allegations by proving that there was no intrusion which took place?&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Most of the times, organisations do not even accept that a breach has taken place, let alone take responsibility for the same and strive to better protect user data in the future.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Switching to ‘PR spin mode’ should never be the answer when dealing with the data of billion-plus Indian citizens and residents. This can be observed in almost all cases where a breach or security lapse was alleged.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;The UIDAI has also acquired the dubious reputation of sending legal notices and slapping cases on journalists and security researchers who seek to highlight the security and privacy problems ailing the Aadhaar infrastructure.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;In March 2017, a case against Sameer Kochhar – chairman of the Skoch Group – was filed on the basis of a complaint from Yashwant Kumar of the UIDAI allegedly for “spreading rumours on the internet about vulnerability of the Aadhaar system”. Kochhar had written an article in February 2017 titled “Is a Deep State at Work to Steal Digital India?” in which a request replay attack on biometric Aadhaar authentication was demonstrated.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Two months later, The Centre for Internet and Society published a report regarding several government websites which were inadvertently leaking millions of Aadhaar card numbers. A few days after this report was published, the UIDAI &lt;a href="https://in.reuters.com/article/india-aadhaar-breach/critics-of-aadhaar-project-say-they-have-%20been-harassed-put-under-surveillance-idINKCN1FX1SS" rel="noopener" target="_blank"&gt;sent a legal notice to the organisation&lt;/a&gt;, stating that the people involved with the report had to be “brought to justice”.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;In January 2018, an investigative story was published by Rachna Khaira of &lt;em&gt;The Tribune&lt;/em&gt; newspaper – in which she reported that access to an Aadhaar portal was being sold by “agents” for as cheap as Rs 500. In response to this story – the UIDAI first sought to discredit the investigative work by calling it a ‘case of misreporting’ – after which they attempted to downplay the magnitude of the report by citing that biometrics were safe and had not been breached.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Following this, the Delhi crime branch registered an FIR against the reporter and others named in the article on the basis of a complaint by a UIDAI official, with charges ranging from forgery, cheating by impersonation and unauthorised access of a computer system.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;In March 2018, &lt;em&gt;ZDNet&lt;/em&gt; published a report about Aadhaar-related data leaking from an unsecured API on a utility provider’s website. This was the result of days of testing to first confirm the existence issue and its scope. It was preempted by more than a month of attempted communication through several channels of communication – email, phone, even direct messages via Twitter – with both Indane and the UIDAI (and even the Indian Consulate in New York).&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;But still, when the report was published after a lack of acknowledgement/response from affected parties, the UIDAI was quick to deny the report as well as any possibility of such a thing occurring. The Aadhaar agency then released a statement in which they said they were ‘contemplating legal action’ against the publication of their report.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Data security and privacy laws won’t do much to affect the dismissive and hostile attitude the UIDAI seems to have regarding the people that investigate and report on security and privacy issues relating to Aadhaar.&lt;/p&gt;
&lt;h3 style="text-align: justify; "&gt;Hide and seek&lt;/h3&gt;
&lt;p style="text-align: justify; "&gt;In general, when it comes to reports of security breaches and security incidents, many authorities in India prefer playing the blame-game. This was seen latest in response to an internal letter (ironically marked as ‘SECRET’) that was circulated on social media – which mentioned that data was stolen from the Aadhaar Seeding portal of the EPFO by hackers exploiting a known vulnerability in the Apache Struts framework.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Following this – the EPFO &lt;a href="https://economictimes.indiatimes.com/wealth/personal-finance-news/epfo-slams-aadhaar-data-theft-reports-on-social-media/articleshow/63999631.cms?utm_source=WAPusers&amp;amp;utm_medium=whatsappshare&amp;amp;utm_campaign=socialsharebutton&amp;amp;from=mdr" rel="noopener" target="_blank"&gt;quickly switched to PR mode&lt;/a&gt; and publicly issued a statement through their official Twitter account (@socialepfo) denying the breach – saying that “There is no leak from EPFO database. We have already shut down the alleged Aadhaar seeding site run by Common Service Centres on 22.03.2018.”&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Every time reports of a potential breach or leak of data circulate, Indian government agencies are quick to come out and announce that no breach has taken place. However, this is always to be taken just on the basis of their saying so, as opposed to the reports which they’re meant to be arguing (in some cases) contain verifiable evidence which is the result of arduous investigative work.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Regardless, passing around the blame and in cases completely denying security incidents is not something authorities should be doing when it concerns the data of more than a billion people.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;In response to a recent story by &lt;em&gt;Asia Times&lt;/em&gt; &lt;a href="https://www.thewire.in/government/cracked-aadhaar-enrolment-software-being-sold" rel="noopener" target="_blank"&gt;regarding Aadhaar enrolment software being cracked and sold&lt;/a&gt;, the UIDAI sought to discredit and discount the report through messages shared on their social media profiles – where they stated that the report was “baseless, false, misleading and irresponsible”.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;The UIDAI should have an interest in protecting any and all data which stems from or relates to Aadhaar as it has to do with a project they are ultimately responsible for. It should not matter whether the leak occurred from a portal on EPFO’s website, an API without proper access controls on Indane’s website, a website of the Andhra Pradesh state government, through biometric request replay attacks, through sold access to admin portals and cracked software, or however else. It should ultimately be the UIDAI’s responsibility to not only be reactive about these issues when they’re brought to light but to do so in such a way which does not hinder reporters from continuing their work.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Additionally, if the UIDAI wishes to keep its systems as secure as they could be – they should proactively seek such reports about flaws or vulnerabilities in critical infrastructure pertaining to their project.&lt;/p&gt;
&lt;h3 style="text-align: justify; "&gt;The way forward&lt;/h3&gt;
&lt;p style="text-align: justify; "&gt;In April 2018, the head of the Indian Computer Emergency Response Team (CERT-IN), &lt;a href="https://factordaily.com/vulnerability-reported-cert/" rel="noopener" target="_blank"&gt;rather defensively noted&lt;/a&gt; that “not a single person had reported any incident” to the organisation.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;CERT-In, a part of the IT ministry, is the central agency responsible for dealing with security issues and incidents. To put it bluntly, it has not done a very great job of outreach when it comes to the people it ultimately relies on: security researchers and hackers.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;In India, there is an abundance of skills and talent when it comes to IT security and this could be of immense help to organisations responsible for managing critical infrastructure – but only if they cared enough to utilise it to the fullest extent.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Ajay Bhushan Pandey, the CEO of UIDAI,  promised a secure and legal bug reporting environment for the Aadhaar ecosystem sometime in 2017. However, almost a year later, there are no tangible signs of any steps being taken to ensure the same. In fact, the UIDAI would already be straying from their usual course of action if they stopped harassing people reporting on issues of security and privacy with regard to Aadhaar.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;It has been suggested that the UIDAI employ a bug bounty programme – which involves rewarding hackers with monetary compensation or through means such as an addition to a ‘Security Hall of Fame’ as an incentive.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;I personally believe that there is no need for a bug bounty programme in its traditional sense – meaning that UIDAI should not have to provide material incentives to attract hackers to report valid issues to them. Simply acknowledging the work of those that discover and report valid issues should more than likely be incentive enough to get talent on-board.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;The US Department of Defense (DoD) employs a similar approach &lt;a href="https://www.hackerone.com/sites/default/files/2018-03/Distributed%20Defense-How%20Governments%20Deploy%20Hacker-Powered%20Security.pdf" rel="noopener" target="_blank"&gt;where they invite hackers from the world&lt;/a&gt; over to test their systems for security vulnerabilities/bugs and then report them in a responsible manner. What the hackers get in return is the acknowledgement of their skill and devotion to ensuring the security of DoD’s platform. Something similar needs to be set up with regard to critical information infrastructures in India so that issues can be reported by anyone who wishes to do so – without hassle and/or fear of persecution hanging over the heads of hackers.&lt;/p&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/news/the-wire-karan-saini-may-11-2018-aadhaar-remains-an-unending-security-nightmare-for-a-billion-indians'&gt;https://cis-india.org/internet-governance/news/the-wire-karan-saini-may-11-2018-aadhaar-remains-an-unending-security-nightmare-for-a-billion-indians&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Admin</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Aadhaar</dc:subject>
    
    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Privacy</dc:subject>
    

   <dc:date>2018-05-13T16:28:40Z</dc:date>
   <dc:type>News Item</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/news/ozy-aayush-soni-may-11-2018-indias-national-id-project-brings-pain-to-those-it-aims-to-help">
    <title>India's National ID Project Brings Pain to Those it Aims to Help</title>
    <link>https://cis-india.org/internet-governance/news/ozy-aayush-soni-may-11-2018-indias-national-id-project-brings-pain-to-those-it-aims-to-help</link>
    <description>
        &lt;b&gt;Poor management, corruption and fraud are threatening to derail the world’s largest national identity project. &lt;/b&gt;
        &lt;p style="text-align: justify; "&gt;The blog post by Aayush Soni was &lt;a class="external-link" href="https://www.ozy.com/fast-forward/indias-national-id-project-brings-pain-to-those-it-aims-to-help/86381"&gt;published in Ozy.com&lt;/a&gt; on May 11, 2018.&lt;/p&gt;
&lt;hr /&gt;
&lt;p style="text-align: justify; "&gt;For Phoolmati, a resident of the Kusumpur Pahari slum in south &lt;a href="https://www.ozy.com/good-sht/how-delhi-went-hipster/69430" target="_blank"&gt;Delhi&lt;/a&gt;, standing every month in a queue at the neighborhood fair-price shop was a trusted routine. When her turn came up, she would place her thumb on a scanning machine that confirmed her identity. But on a biting-cold morning this past January, she had to return home empty-handed because, the shopkeeper told her, the “server was down.”&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;The next day, it happened again. On her third try, Phoolmati thought she had gotten lucky when the machine scanned her thumb successfully. But she was in for a shock. “The shopkeeper told me that, according to the computer records, I’ve already taken my quota of wheat flour for the month,” she says. When she protested and showed her ration card, another form of identification, the shopkeeper wouldn’t accept it.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Left with no choice, Phoolmati had to buy wheat flour from the open market at 25 rupees per kilogram — more than 12 times the amount she usually paid at fair-price shops. She wasn’t alone. At a weekly meeting of slum residents in a temple courtyard in April, many women complained about the difficulty of buying subsidized food grains to the Satark Nagrik Sangathan (Alert Citizens Organization), a nonprofit that seeks accountability from government agencies. Nanno Devi, a 67-year-old homemaker whose fingers are wrinkled with age, said that she didn’t receive her quota of wheat flour for January because a fingerprint-scanning machine couldn’t detect her thumb impression.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Nor are the urban poor, like Phoolmati, the only ones with such complaints. Students with government scholarships, senior citizens with pensions, farmers entitled to subsidies, religious minorities and backward castes eligible for benefits, patients at public hospitals, young couples trying to get married and professionals updating their bank details are all on the front line of an unparalleled experiment that was meant to help them but is hurting them instead.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Theirs is the lived experience of &lt;a href="https://www.ozy.com/fast-forward/whos-ready-for-the-biometric-id-revolution/30972" target="_blank"&gt;Aadhaar&lt;/a&gt;, a unique 12-digit identity system that includes an individual’s biometrics and demographic data — and that must verify an individual’s identity for the government, increasingly, to even recognize their existence. First rolled out in 2010, it is modeled on America’s Social Security number system, with the aim that government subsidies and welfare programs reach the intended beneficiaries and aren’t siphoned off by middlemen.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;But over the past three years, India’s Narendra Modi government has cajoled, pressured and often effectively forced people into enrolling for this ID, even though it isn’t required by law. Today, a person’s bank account risks being frozen if it isn’t linked to her Aadhaar number. Her PAN (permanent account number) card, used to file income tax, could be declared invalid. Mobile phone companies can disconnect her number if it isn’t authenticated through biometrics. An Aadhaar number (or an enrollment number, in case someone has already applied for it) is mandatory to open a new bank account, get a new passport, invest in mutual funds or register a marriage. A joke making the rounds on Twitter is that very soon, Aadhaar will be mandatory for a person to swipe right on Tinder.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;In the absence of any privacy law, much of the concern within sections of India’s educated middle class has focused on questions about personal freedom, data security and mass surveillance. But a parallel tide of complaints is rising from those the program was meant to help, rooted in complications it has instead imposed upon them. This growing frustration is threatening to derail the initiative in a manner privacy can’t, in a nation where millions live in cramped city apartments with strangers, and the distinction between personal and public is often blurred.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Cases of fraud, mismanagement and corruption hurting Aadhaar beneficiaries are tumbling out into the public domain almost every week. In late March, hackers used weaknesses in the Aadhaar database to steal data from a government organization that manages more than $120 billion in the pensions and savings of millions of Indians. In January, a 10-year-old girl from the Dalit community — historically at the bottom of India’s caste ladder — was denied a school scholarship because officials had misnamed her on her Aadhaar card. Last October, a farm loan waiver program in Maharashtra state ran into trouble after officials discovered that 100 farmers had the same Aadhaar identity number.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;The Modi government maintains that it takes both the security of personal data and the concerns of Aadhaar beneficiaries seriously. But it is reluctant to answer any questions about identity theft, corruption, privacy or misappropriated benefits. Neither Ajay Bhushan Pandey, the current CEO of the Unique Identification Authority of India (UIDAI), which runs Aadhaar, nor Vikas Shukla, its spokesperson, responded to multiple requests for comment.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;At a public rally in early May, Modi — who had himself opposed the program before he came to power in 2014 — called critics of Aadhaar “opponents of technology” unwilling to evolve with the times. Increasingly, though, many are questioning whether it’s Aadhaar’s own identity that has changed the most from when the idea first came up. “From a project of inclusion, it has become a project of exclusion,” says Usha Ramanathan, a lawyer who focuses on issues of development and poverty. Just ask Phoolmati.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Aadhaar was the brainchild of Nandan Nilekani, a former CEO of tech giant Infosys, who in a 2009 book argued that multiple forms of identification made it “difficult” to establish a “definitive identity” for India’s citizens.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;A single identity linked to passports, PAN cards and other national databases, Nilekani argued, would not only solve this problem but also help eliminate the exasperating processes that India’s bureaucracy is notorious for — mountains of paper, proof of identity in triplicate and a glacial pace of work. It would help citizens avail government benefits that are rightfully theirs. Such a system would reduce a citizen’s dependence on distribution mechanisms susceptible to leakages and make “the moral scruples of our bureaucrats redundant,” Nilekani wrote. “An IT-enabled, accessible national &lt;a href="https://www.ozy.com/fast-forward/should-you-carry-a-municipal-id-card/31240" target="_blank"&gt;ID system&lt;/a&gt; would be nothing less than revolutionary in how we distribute state benefits and welfare handouts.”&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;That same year, the Congress Party–led United Progressive Alliance government offered Nilekani a chance to translate his idea into reality, appointing him UIDAI chairman. Under Nilekani the UIDAI hired people from within the Indian bureaucracy as well as those outside it. The initial team of 50 included software engineers, designers and entrepreneurs from Silicon Valley as well as lawyers and policy wonks who worked at the head office in New Delhi. Each of the eight regional offices had a staff of 20.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;In its early-stage avatar, the team had thought out solutions to problems such as the ones the residents of Kusumpur Pahari faced, says a policy consultant who worked with the UIDAI in 2010 and spoke on condition of anonymity. “You can use old methods and physically verify a person’s name and address [by going to their house] if biometrics aren’t working,” the consultant says. “It’s built into the architecture [of Aadhaar].” In his view, the current government under &lt;a href="https://www.ozy.com/provocateurs/the-man-busting-narendra-modis-tall-tales/83435" target="_blank"&gt;Modi&lt;/a&gt; — whose Bharatiya Janata Party defeated the Congress Party and came to power in 2014 — and the UIDAI setup have made a “mess” of the program. He also believes that the goal has shifted from inclusion to mass enrollment. Nilekani did not respond to a request for comment.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;For sure, Aadhaar has staunch supporters too, who argue that it has helped reduce the misuse of government subsidies. In July 2017, India’s junior minister for consumer affairs, food and public distribution, C.R. Chaudhary, told the country’s Parliament that Aadhaar had helped the government delete nearly 25 million fake ration cards that the poor use to access subsidized food ingredients.&lt;/p&gt;
&lt;div class="pagebreak" style="text-align: justify; "&gt;&lt;/div&gt;
&lt;div class="ozy-advert-wrapper" style="text-align: justify; "&gt;
&lt;div id="sas_86381_2"&gt;&lt;/div&gt;
&lt;/div&gt;
&lt;p style="text-align: justify; "&gt;“This unnecessary fearmongering around Aadhaar is uncalled for,” says Sanjay Anandaram of iSpirit, a software industry think tank. In his view, it’s “last-mile deployment challenges” like fingerprint authentication, one-time-password systems and server glitches that need to be fixed, not Aadhaar. He juxtaposes anecdotal examples of people struggling to gain benefits with the “larger purpose” he believes Aadhaar serves. “It is a revolutionary system to ensure governance improves — especially for centrally administered programs,” he says.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;The UIDAI has made some efforts too, if not to improve security of personal data then at least to allow citizens to check whether their Aadhaar identity has been misused. They can go online and view any occasions when their Aadhaar identity was used to access benefits.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;But for millions of Indians dependent on subsidies, pensions, scholarships and other benefits, the concerns go well beyond privacy. Getting an Aadhaar identity can be a struggle. Earlier this year, the Punjab government conceded that it can’t process nearly 200,000 farm loan waiver claims either because intended beneficiaries don’t have Aadhaar cards or because the UIDAI is still processing their applications. At the same time, not signing on to Aadhaar is increasingly not an option. In February 2017, Chaudhary’s ministry made it mandatory for individuals to have an Aadhaar card to access subsidized food grains. Then, in October, an 11-year-old girl died of starvation in the central state of Jharkhand because the local ration dealer refused to give her family food grains for six months, as they had not linked their ration cards to Aadhaar. Facing criticism, the government asked states not to deny the poor the food grains they are entitled to, but the incident underscored how the Aadhaar initiative is cutting the needy off from subsidy access, rather than helping them, suggests Ramanathan, the lawyer. “People are dying because of Aadhaar,” she says.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;But the &lt;a href="https://www.ozy.com/rising-stars/can-modis-new-nemesis-take-down-the-prime-minister/85152" target="_blank"&gt;Modi government&lt;/a&gt; has shown no signs of rethinking either the ways in which Aadhaar appears to hurt the poorest in Indian society or its data security protocols. Instead, it has appeared keener to target whistle-blowers pointing out weaknesses in the initiative.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;It cost Rachna Khaira, a reporter, only 500 rupees ($7.50) to access the entire Aadhaar database — the names, addresses, fingerprint scans, iris scans, mobile phone numbers, email addresses, postal index numbers (PINs) and Aadhaar numbers of 830 million Indians. She “purchased” the service offered by anonymous sellers on WhatsApp and transferred the money via Paytm, a popular digital wallet company, to an “agent,” who created a “gateway” for Khaira. He then gave her a log-in ID and a password to that gateway, which allowed Khaira unrestricted access to the Aadhaar database. Her report, published in January in &lt;em&gt;The Tribune&lt;/em&gt;, one of India’s oldest English dailies, created a national stir. Instead of trying to plug the holes the report had revealed, the UIDAI filed criminal cases against Khaira and the newspaper, accusing them of breaching privacy.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Khaira’s wasn’t the first piece of evidence to expose the vulnerability of the Aadhaar database. In May 2017, a report by the Centre for Internet and Society, a nonprofit organization, claimed that 130 million to 135 million Aadhaar numbers were published on four websites: the National Social Assistance Programme, the National Rural Employment Guarantee Scheme and two projects run by Andhra Pradesh state. “This is the largest exercise in the world of the conversion of public information into an asset and then its privatization,” says Nikhil Pahwa, editor of MediaNama and a critic of Aadhaar.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;These breaches of security highlight corruption and mismanagement that belie claims the government continues to peddle. In April 2017, Ravi Shankar Prasad, India’s minister of information and technology, told Parliament that “Aadhaar is robust. Aadhaar is safe. Aadhaar is secure, and totally accountable.” The government hasn’t appeared too perturbed by privacy concerns. On July 22, 2015, Mukul Rohatgi, the then attorney general, argued before the country’s Supreme Court that “the right of privacy is not a guaranteed right under our constitution.” That set off a two-year-long hearing before a nine-judge bench of the court, which unanimously ruled in 2017 that the right to privacy was indeed a fundamental right.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;The criticism from social groups Aadhaar was meant to benefit, though, has left the Modi administration on the defensive. Since the passage of the 2016 Aadhaar law, civil society activists have filed 12 petitions in the &lt;a href="https://www.ozy.com/provocateurs/why-this-rohingya-refugee-is-taking-on-indias-government/82487" target="_blank"&gt;Supreme Court&lt;/a&gt; challenging its legality. In January, the All India Kisan Sabha, one of India’s largest farmer organizations with millions of members, petitioned the top court against government moves to link subsidies to Aadhaar identities. Some leaders from Modi’s party, the BJP, have also started questioning their own government in Parliament about cases of beneficiaries denied their due because of the Aadhaar program. The Supreme Court, which is holding regular hearings on the case, has extended indefinitely the date by which citizens must link all identity documents to their Aadhaar number, until it rules on the validity of the legislation. At stake is the trust the Indian people can place in their government.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Back in Kusumpur Pahari, much of that trust has already eroded. In his 2014 election campaign, Modi had promised to stand guard as a &lt;em&gt;chaukidaar&lt;/em&gt; (watchman) over the country’s resources, to prevent corruption. But when someone illegally withdrew Phoolmati’s grains by using her Aadhaar identity, the watchman wasn’t able to stop the theft.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;For Phoolmati and other residents of Kusumpur Pahari, their ration cards guaranteed them food, and were a rare pillar of certainty in an unstable life. The Aadhaar-linked fingerprint authentication system is a source of frustration, and they don’t want it, they make clear at their weekly meeting. They now get their ration some months, and other months they don’t. Life on the fringes of society was already tough. Aadhaar, they say, has made it harder still.&lt;/p&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/news/ozy-aayush-soni-may-11-2018-indias-national-id-project-brings-pain-to-those-it-aims-to-help'&gt;https://cis-india.org/internet-governance/news/ozy-aayush-soni-may-11-2018-indias-national-id-project-brings-pain-to-those-it-aims-to-help&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Admin</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Aadhaar</dc:subject>
    
    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Privacy</dc:subject>
    

   <dc:date>2018-05-12T00:53:39Z</dc:date>
   <dc:type>News Item</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/news/artificial-intelligence-for-growth-leveraging-ai-and-robotics-for-indias-economic-transformation">
    <title>Artificial Intelligence for Growth: Leveraging AI and Robotics for India's Economic Transformation</title>
    <link>https://cis-india.org/internet-governance/news/artificial-intelligence-for-growth-leveraging-ai-and-robotics-for-indias-economic-transformation</link>
    <description>
        &lt;b&gt;Amber Sinha took part in the second international conference organized by ASSOCHAM at Hotel Shangri-La in New Delhi on April 27, 2018.&lt;/b&gt;
        &lt;h3&gt;Keynote Address&lt;/h3&gt;
&lt;p&gt;12.15 p.m. - 12.30 p.m.: Shri Gopalakrishnan S., Joint Secretary, Ministry of Electronics and IT, Government of India&lt;/p&gt;
&lt;h3&gt;Special Address&lt;/h3&gt;
&lt;p style="text-align: justify; "&gt;12.30 p.m. - 12.45 p.m.: Dr. Pushpak Bhattacharyya, Director and Professor, Computer Science and Engg, IIT Patna and Chairman, BIS Committee for Standardisation in Artificial Intelligence&lt;/p&gt;
&lt;h2 style="text-align: justify; "&gt;Panel Discussion&lt;/h2&gt;
&lt;h3&gt;Session Moderator&lt;/h3&gt;
&lt;p&gt;12.45 p.m. - 1.40 p.m.: Shri Sudipta Ghosh, India                         Leader, Data and Analytics, PwC&lt;/p&gt;
&lt;h3&gt;Panelists&lt;/h3&gt;
&lt;ul&gt;
&lt;li&gt;Shri                           Amber Sinha, Senior Programme Manager, Centre                           for Internet and Society&lt;/li&gt;
&lt;li&gt;Shri                           Utpal Chakraborty, Lead Architect - AI,                           L&amp;amp;T Infotech &lt;/li&gt;
&lt;li&gt;Shri                           Atul Rai, CEO &amp;amp; Co-Founder, Staqu                           Technologies&lt;/li&gt;
&lt;li&gt;Shri                           Prabhat Manocha, IBM&lt;/li&gt;
&lt;/ul&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/news/artificial-intelligence-for-growth-leveraging-ai-and-robotics-for-indias-economic-transformation'&gt;https://cis-india.org/internet-governance/news/artificial-intelligence-for-growth-leveraging-ai-and-robotics-for-indias-economic-transformation&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Admin</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Artificial Intelligence</dc:subject>
    
    
        <dc:subject>Privacy</dc:subject>
    

   <dc:date>2018-05-05T09:08:07Z</dc:date>
   <dc:type>News Item</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/news/economic-times-may-2-2018-cyber-experts-say-playground-open-for-influencing-elections">
    <title>Cyber experts say 'playground open' for influencing elections</title>
    <link>https://cis-india.org/internet-governance/news/economic-times-may-2-2018-cyber-experts-say-playground-open-for-influencing-elections</link>
    <description>
        &lt;b&gt;Cyber experts said that under the provisions provided by 43 (A) of Indian IT Act, two types of data collection are completely legal: first, the data shared by the user in the public domain and secondly, the data published by the social platforms, like Facebook.&lt;/b&gt;
        &lt;p style="text-align: justify; "&gt;This article was published in the &lt;a class="external-link" href="https://cio.economictimes.indiatimes.com/news/digital-security/cyber-experts-say-playground-open-for-influencing-elections/63994457"&gt;Economic Times&lt;/a&gt; on May 2, 2018. Sunil Abraham was quoted.&lt;/p&gt;
&lt;hr /&gt;
&lt;p style="text-align: justify; "&gt;With the Karnataka Assembly &lt;a href="https://cio.economictimes.indiatimes.com/tag/elections"&gt;elections&lt;/a&gt; round the corner, the cyber experts have said that it is quite possible to influence elections in India.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Talking to ANI, cyber expert Sunil Abraham did not rule out the possibility of influencing the voters as India does not have &lt;a href="https://cio.economictimes.indiatimes.com/tag/data+protection"&gt;data protection&lt;/a&gt; law in place.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;He said under the provisions provided by 43 (A) of Indian IT Act, two types of data collection are completely legal: first, the data shared by the user in the public domain and secondly, the data published by the social platforms, like &lt;a href="https://cio.economictimes.indiatimes.com/tag/facebook"&gt;Facebook&lt;/a&gt; and Twitter, which was shared by the user for his/her friends.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;"Both these types of data are not considered sensitive personal data. Under Indian law, if they are collecting your biometrics, passwords and health information only then they need your consent," Abraham told in an exclusive interview.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Replying a question about the chances of &lt;a href="https://cio.economictimes.indiatimes.com/tag/political+parties"&gt;political parties&lt;/a&gt; influencing elections, Abraham said, "One cannot answer this question with a clear yes or no. But, the more a political party has in its database about you; the more they can micro-target you for various types of advertising."&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;He, however, said with the literacy level of Indian internet users, the chances are high that they can be manipulated.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;"Once they do this, especially in a country where 30 percent of the public is illiterate and only 10 percent of public knows English and many-many users have just come online, there is a high chance that these users can be manipulated," the cyber expert said.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;When asked can it be termed influence, he said, "It will definitely be an influence. Most of the internet users in India have just come online, they don't have media literacy; they have not consumed older technologies like television and broadcast media like radio sufficiently enough so it is easy for these users to get fooled by the content that is propaganda and fake news etcetera."&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Abraham said it is unlikely that India will have a data protection law before 2019 general elections, which means the playground is open for people with a clever idea to manipulate voters.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;"India is working on data protection laws from last eight years. With the existing laws; all the political parties, social media companies, and search engine optimization companies etcetera can do what they want and they won't get into trouble. So, it is very unlikely that this data protection law is going to be approved by Parliament the 2019 elections. So for the 2019 elections, it is going to be very exciting times because anybody who has any clever idea when it comes to manipulating voters, they will definitely try it. Because, there is no law to stop them from trying those tricks," the cyber expert said.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Replying to another question about India's position in data &lt;a href="https://cio.economictimes.indiatimes.com/news/digital-security"&gt;security&lt;/a&gt;, he said, 'India is lagging as per the global trend across the world. The European Union's world-class data protection law called 'General Data Protection Regulation' is being followed by all the countries with the exception of the US. About 108 countries have the data protection laws which look similar to the EU's General Data Protection Regulation."&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;He, however, added, "We shouldn't be upset because making a law in a big country like India takes time. Shri Krishna Committee is going to present the draft of the Indian data protection law and hopefully, within one or two years India will have data protection law."&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Another expert Shubhamangala Sunil told ANI that "In India, our data is not secure today. Be it politicians or businesses, they want the database of people. Many data &lt;a href="https://cio.economictimes.indiatimes.com/tag/breaches"&gt;breaches&lt;/a&gt; have already happened and they are being used for different propagandas".&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;She said the union government and state governments should come forward and tell people about data security measures instead of people complaining about the data breach.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;She also said India is at least 10 years behind in comparison with the world in the &lt;a href="https://cio.economictimes.indiatimes.com/tag/cyber+security"&gt;cyber security&lt;/a&gt; domain.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;The comments of the experts have come in the backdrop of recently data breach in the Facebook wherein its CEO Mark Zuckerberg faced US Congress for two days over the data theft. The Facebook-Cambridge Analytica &lt;a href="https://cio.economictimes.indiatimes.com/tag/data+scandal"&gt;data scandal&lt;/a&gt; involves the collection of personally identifiable information of up to 87 million Facebook users and almost certainly a much greater number that Cambridge Analytica began collecting in 2014.&lt;/p&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/news/economic-times-may-2-2018-cyber-experts-say-playground-open-for-influencing-elections'&gt;https://cis-india.org/internet-governance/news/economic-times-may-2-2018-cyber-experts-say-playground-open-for-influencing-elections&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Admin</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Privacy</dc:subject>
    

   <dc:date>2018-05-03T03:17:33Z</dc:date>
   <dc:type>News Item</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/news/ani-may-2-2018-data-usage-by-political-parties">
    <title>Data Usage by Political Parties</title>
    <link>https://cis-india.org/internet-governance/news/ani-may-2-2018-data-usage-by-political-parties</link>
    <description>
        &lt;b&gt;Sunil Abraham spoke to ANI regarding collection of data and its use by political parties for electoral gains.&lt;/b&gt;
        &lt;p style="text-align: center; "&gt;&lt;img src="https://cis-india.org/home-images/SunilAbraham.jpg/@@images/19202b15-4087-4780-bfcb-05045600c705.jpeg" alt="Sunil" class="image-inline" title="Sunil" /&gt;&lt;/p&gt;
&lt;blockquote class="twitter-tweet"&gt;
&lt;p dir="ltr"&gt;In law, if bio metrics, passwords &amp;amp; health info are collected, only then consent is needed. The more a political party has in its database, the more they can micro-target you. It'll be an influence: Sunil Abraham,cyber expert on date usage by political parties &lt;a href="https://twitter.com/hashtag/KarnatakaElections?src=hash&amp;amp;ref_src=twsrc%5Etfw"&gt;#KarnatakaElections&lt;/a&gt; &lt;a href="https://t.co/eUk478jJbB"&gt;pic.twitter.com/eUk478jJbB&lt;/a&gt;&lt;/p&gt;
— ANI (@ANI) &lt;a href="https://twitter.com/ANI/status/991329177973510149?ref_src=twsrc%5Etfw"&gt;1 May 2018&lt;/a&gt;&lt;/blockquote&gt;
&lt;hr /&gt;
&lt;p&gt;See the &lt;a class="external-link" href="https://twitter.com/ANI/status/991329177973510149?s=19"&gt;original here&lt;/a&gt;&lt;/p&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/news/ani-may-2-2018-data-usage-by-political-parties'&gt;https://cis-india.org/internet-governance/news/ani-may-2-2018-data-usage-by-political-parties&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Admin</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Privacy</dc:subject>
    

   <dc:date>2018-05-03T03:14:02Z</dc:date>
   <dc:type>News Item</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/news/hindustan-times-april-30-2018-prasun-sonwalkar-vidhi-choudhury-now-twitter-too-caught-up-in-cambridge-analytica-controversy">
    <title>Now, Twitter too caught up in Cambridge Analytica controversy</title>
    <link>https://cis-india.org/internet-governance/news/hindustan-times-april-30-2018-prasun-sonwalkar-vidhi-choudhury-now-twitter-too-caught-up-in-cambridge-analytica-controversy</link>
    <description>
        &lt;b&gt;Twitter does not share a break-up of users by region, the platform has less than 100 million users in India.&lt;/b&gt;
        &lt;p style="text-align: justify; "&gt;The article by Prasun Sonwalkar and Vidhi Choudhury was published in the &lt;a class="external-link" href="https://www.hindustantimes.com/tech/now-twitter-too-caught-up-in-cambridge-analytica-controversy/story-3SMBniRitMG7Ne85AX86wL.html"&gt;Hindustan Times&lt;/a&gt; on April 30, 2018. Sunil Abraham was quoted.&lt;/p&gt;
&lt;hr /&gt;
&lt;p style="text-align: justify; "&gt;Social media company Twitter Inc sold data to the University of Cambridge academic Aleksandr Kogan who harvested millions of Facebook users’ information without their knowledge, it has emerged, although the company has clarified that no private data was accessed.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;It isn’t clear whether any of the data pertained to Indian users.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Twitter does not share a break-up of users by region, the platform has less than 100 million users in India.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Kogan, who created tools that allowed political consultancy Cambridge Analytica to psychologically profile and target voters, bought the data from the microblogging website in 2015, well before the recent scandal, involving use of the data of Facebook users, came to light.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;According to The Daily Telegraph, Kogan bought data on tweets, user names, photos, profiles and locations over a five-month period between December 2014 and April 2015 through his company Global Science Research (GSR). Twitter said it had banned GSR and Cambridge Analytica from buying data or running advertisements on the website and that no private data had been accessed, while Kogan insisted the data had only been used to create "brand reports" and "survey extender tools" and that he had not violated Twitter's policies.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;The daily reported that Twitter charges companies and organisations for large data sets that are particularly useful for gleaning public opinion or receptiveness to certain topics and ideas, although Twitter bans companies from using the data to derive sensitive political information or matching it with personal information obtained elsewhere.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;A Twitter spokesman confirmed the ban and said: "Twitter has also made the policy decision to off-board advertising from all accounts owned and operated by Cambridge Analytica. This decision is based on our determination that Cambridge Analytica operates using a business model that inherently conflicts with acceptable Twitter Ads business practices. "Cambridge Analytica may remain an organic user on our platform, in accordance with the Twitter Rules."&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;The company said it does not allow "inferring or deriving sensitive information like race or political affiliation, or attempts to match a user's Twitter information with other personal identifiers" and that it had staff in place to police this "rigorously".&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Sunil Abraham, founder for think tank Centre for Internet and Society said: “Even though Twitter claims it has contracts in place and staff for contractual enforcement, I cannot understand how they will prevent those buying their data from inferring race and political affiliation. Especially in jurisdictions like ours without comprehensive data protection law.”&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;A Cambridge Analytica spokesman said the company used Twitter for political advertising but insisted that it had never "undertaken a project with GSR focusing on Twitter data and Cambridge Analytica has never received Twitter data from GSR”.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Delhi-based lawyer Apar Gupta said, “Since we do not have a data protection law at present we are more or less dependent on the proactive disclosures by Twitter. Facebook is not a gold standard of upholding user rights and it is hoped that we soon have a regulator that can enforce such disclosures and place penalties.”&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;On 5 April, Facebook said user data of more than 560,000 Indians may have been harvested by British researcher Cambridge Analytica, at the centre of a recent storm over data breaches and potential privacy violations on the social media network.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;“Twitter or Facebook are not alone in harvesting and storing user data. This is a widespread industry practice that relies on profiling. Such breaches and malpractices will continue to occur till we have a set of defined norms and enforceable penalties to protect user rights,” Gupta further added.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Only 335 users in India installed the thisisyourdigitallife app developed by academic Kogan and his company Global Science Research that may have been possibly at the centre of the data breaches, according to Facebook. The 335 people make up just 0.1% of the app’s total worldwide installs. Users agreed to take a personality test and have their data collected by the app, which then went on to also access information about the test-takers’ Facebook friends, leading to the accumulation of a much larger data pool.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Twitter Inc’s spokesperson said in an e-mail that an internal review conducted by it showed GSR had not accessed any private data.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;“Unlike many other services, Twitter is public by its nature. People come to Twitter to speak publicly, and public Tweets are viewable and searchable by anyone. In 2015, Global Science Research (GSR) did have one-time API access to a random sample of public Tweets from a five-month period from December 2014 to April 2015,” the company statement added.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;This is basically information that users chose to make public.&lt;/p&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/news/hindustan-times-april-30-2018-prasun-sonwalkar-vidhi-choudhury-now-twitter-too-caught-up-in-cambridge-analytica-controversy'&gt;https://cis-india.org/internet-governance/news/hindustan-times-april-30-2018-prasun-sonwalkar-vidhi-choudhury-now-twitter-too-caught-up-in-cambridge-analytica-controversy&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Admin</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Social Media</dc:subject>
    
    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Privacy</dc:subject>
    

   <dc:date>2018-05-02T02:49:25Z</dc:date>
   <dc:type>News Item</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/news/new-indian-express-april-26-2018-aadhaar-data-over-89-lakh-mnrega-workers-in-andhra-pradesh-leaked-online">
    <title>Aadhaar data of over 89 lakh MNREGA workers in Andhra Pradesh leaked online</title>
    <link>https://cis-india.org/internet-governance/news/new-indian-express-april-26-2018-aadhaar-data-over-89-lakh-mnrega-workers-in-andhra-pradesh-leaked-online</link>
    <description>
        &lt;b&gt;Independent security researcher Kodali Srinivas tweeted screenshots of Aadhaar data of 89,38,138 MNREGA workers available on the Andhra Pradesh Benefit Disbursement Portal.&lt;/b&gt;
        &lt;p style="text-align: justify; "&gt;The article was &lt;a class="external-link" href="http://www.newindianexpress.com/states/andhra-pradesh/2018/apr/26/aadhaar-data-of-over-89-lakh-mnrega-workers-in-andhra-pradesh-leaked-online-1806717.html"&gt;published in New Indian Express&lt;/a&gt; on April 27, 2018.&lt;/p&gt;
&lt;hr /&gt;
&lt;p style="text-align: justify; "&gt;Independent security researcher Kodali Srinivas, who exposed the leakage of Aadhaar and other personal data of 1.34 lakh beneficiaries on the State Housing Corporation website, on Thursday tweeted screenshots of Aadhaar data of 89,38,138 MNREGA workers availalbe on the Andhra Pradesh Benefit Disbursement Portal, which is maintained by APOnline, a joint venture between the Tata Consultancy Services (TCS) and the State government.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Hours after he blew the whistle, the website administrators began masking the data. In May 2017, Srinivas had co-authored a report for the Centre for Internet and Society, exposing how the Aadhaar data of 13.5 crore card holders was leaked online. The data was then leaked by four government portals, National Social Assistance Programme, National Rural Employment Guarantee Scheme, Chandranna Bima Scheme of the Government of Andhra Pradesh and Daily Online Payment Reports of NREGA of the Government of Andhra Pradesh.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;It appears that almost a year later, nothing much has changed. Srinivas told TNIE he had sent a mail to the chief operating officer, APOnline and Universal Identification Authority of India, the National Critical Information Infrastructure Protection Centre, and CERT-In, the Centre's cyber response wing. When contacted, Balasubramanyam, Joint Secretary (NREGS) told TNIE, "I have seen it. It is Benefit Disbursement Portal... not maintained by us. We have been very careful ever since that massive leak of data last year."&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Executive (operations), APOnline, S Chandramouleeswara Reddy refused comment saying that he was not the competent authority to speak on the issue. APOnline developed ICT solution for MGNREGA scheme, a framework involving Department of Posts, for disbursement of entitlements after accurate authentication of the entitlements through finger print authentication. TCS implements the ICT solution for MGNREGA in the State.&lt;/p&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/news/new-indian-express-april-26-2018-aadhaar-data-over-89-lakh-mnrega-workers-in-andhra-pradesh-leaked-online'&gt;https://cis-india.org/internet-governance/news/new-indian-express-april-26-2018-aadhaar-data-over-89-lakh-mnrega-workers-in-andhra-pradesh-leaked-online&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Admin</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Aadhaar</dc:subject>
    
    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Privacy</dc:subject>
    

   <dc:date>2018-05-05T08:43:53Z</dc:date>
   <dc:type>News Item</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/news/pai-wg-labor-and-economy-meeting">
    <title>PAI WG Labor and Economy Meeting</title>
    <link>https://cis-india.org/internet-governance/news/pai-wg-labor-and-economy-meeting</link>
    <description>
        &lt;b&gt;Elonnai Hickok co-chaired the first PAI Labor and Economy WG in NYC on April 25, 2018.&lt;/b&gt;
        &lt;p&gt;&lt;a class="external-link" href="http://cis-india.org/internet-governance/files/pai-wg-labor-and-economy"&gt;Agenda&lt;/a&gt;&lt;/p&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/news/pai-wg-labor-and-economy-meeting'&gt;https://cis-india.org/internet-governance/news/pai-wg-labor-and-economy-meeting&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Admin</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Privacy</dc:subject>
    

   <dc:date>2018-05-05T09:35:07Z</dc:date>
   <dc:type>News Item</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/blog/asia-times-april-20-2018-aayush-rathi-sunil-abraham-what-s-up-with-whatsapp">
    <title>What’s up with WhatsApp?</title>
    <link>https://cis-india.org/internet-governance/blog/asia-times-april-20-2018-aayush-rathi-sunil-abraham-what-s-up-with-whatsapp</link>
    <description>
        &lt;b&gt;In 2016, WhatsApp Inc announced it was rolling out end-to-end encryption, but is the company doing what it claims to be doing?&lt;/b&gt;
        &lt;p style="text-align: justify; "&gt;The article by Aayush Rathi and Sunil Abraham was published in &lt;a class="external-link" href="http://www.atimes.com/article/whats-up-with-whatsapp/"&gt;Asia Times&lt;/a&gt; on April 20, 2018.&lt;/p&gt;
&lt;hr /&gt;
&lt;p style="text-align: justify; "&gt;Back in April 2016, when WhatsApp Inc announced it was rolling out end-to-end encryption (E2EE) for its billion-plus strong user base as a default setting, the messaging behemoth signaled to its users it was at the forefront of providing technological solutions to protect privacy.&lt;/p&gt;
&lt;p class="p4" style="text-align: justify; "&gt;Emphasized in the security white paper explaining the implementation of the technology is the encryption of both forms of communication – one-to-one and group and also of all types of messages shared within such communications – text as well as media.&lt;/p&gt;
&lt;p class="p4" style="text-align: justify; "&gt;Simply put, all communication taking place over WhatsApp would be decipherable only to the sender and recipient – it would be virtual gibberish even to WhatsApp.&lt;/p&gt;
&lt;p class="p4" style="text-align: justify; "&gt;This announcement came in the backdrop of &lt;a href="https://www.theguardian.com/us-news/2016/feb/17/apple-ordered-to-hack-iphone-of-san-bernardino-shooter-for-fbi"&gt;Apple locking horns with the FBI&lt;/a&gt; after being asked to provide a backdoor to unlock the San Bernardino mass shooter’s iPhone. This further reinforced WhatsApp Inc’s stand on the ensuing debate between the interplay of privacy and security in the digital age.&lt;/p&gt;
&lt;p class="p4" style="text-align: justify; "&gt;Kudos to WhatsApp, for there is &lt;a href="http://www.ohchr.org/EN/Issues/FreedomOpinion/Pages/CallForSubmission.aspx"&gt;growing discussion&lt;/a&gt; around how encryption and anonymity is central to enabling secure online communication which in turn is integral to essential human rights such as those of freedom of opinion and expression.&lt;/p&gt;
&lt;p class="p4" style="text-align: justify; "&gt;WhatsApp may have taken encryption to the masses, but here we outline why WhatsApp’s provisioning of privacy and security measures needs a more granular analysis – is the company doing what it claims to be doing? Security issues with WhatsApp’s messaging protocol certainly are not new.&lt;/p&gt;
&lt;h3 style="text-align: justify; "&gt;Man-in-the-middle attacks&lt;/h3&gt;
&lt;p class="p4" style="text-align: justify; "&gt;A &lt;a href="https://eprint.iacr.org/2017/713.pdf"&gt;study&lt;/a&gt; published by a group of German researchers from Ruhr University highlighted issues with WhatsApp’s implementation of its E2EE protocol to group communications. Another &lt;a href="https://courses.csail.mit.edu/6.857/2016/files/36.pdf"&gt;paper&lt;/a&gt; points out how WhatsApp’s session establishment strategy itself could be problematic and potentially be targeted for what are called man-in-the-middle (MITM) attacks.&lt;/p&gt;
&lt;p class="p4" style="text-align: justify; "&gt;An MITM attack takes the form of a malicious actor, as the term suggests, placing itself between the communicating parties to eavesdrop or impersonate. The Electronic Frontier Foundation also &lt;a href="https://www.eff.org/deeplinks/2016/10/where-whatsapp-went-wrong-effs-four-biggest-security-concerns"&gt;highlighted&lt;/a&gt; other security vulnerabilities, or trade-offs, depending upon ideological inclinations, with respect to WhatsApp allowing for storage of unencrypted backups, issues with WhatsApp’s web client and also with its approach to cryptographic key change notifications.&lt;/p&gt;
&lt;p class="p4" style="text-align: justify; "&gt;Much has been written questioning WhatsApp’s shifting approach to ensuring privacy too. Quoting straight from &lt;a href="https://www.whatsapp.com/legal/#privacy-policy-affiliated-companies"&gt;WhatsApp’s Privacy Policy:&lt;/a&gt; “We joined the Facebook family of companies in 2014. As part of the Facebook family of companies, WhatsApp receives information from, and shares information with, this family of companies.” Speaking of Facebook …&lt;/p&gt;
&lt;p class="p4" style="text-align: justify; "&gt;Culling out larger issues with WhatsApp’s privacy policies is not the intention here. What we specifically seek to explore is right at the nexus of WhatsApp’s security and privacy provisioning clashing with its marketing strategy: the storage of data on WhatsApp’s servers, or ‘blobs,’ as they are referred to in the technical paper. Facebook’s rather. In WhatsApp’s words: “Once your messages (including your chats, photos, videos, voice messages, files and share location information) are delivered, they are deleted from our servers. Your messages are stored on your own device.”&lt;/p&gt;
&lt;p class="p4" style="text-align: justify; "&gt;In fact, this non-storage of data on their ‘blobs’ is emphasizes at several other points on the official website. Let us call this the deletion-upon-delivery model.&lt;/p&gt;
&lt;h3 style="text-align: justify; "&gt;A simple experiment&lt;/h3&gt;
&lt;p class="p4" style="text-align: justify; "&gt;While drawing up a rigorous proof of concept, made near-impossible thanks to WhatsApp being a closed source messaging protocol, a simple experiment is enough to raise some very pertinent questions about WhatsApp’s outlined deletion-upon-delivery model. It should, however, be mentioned that the Signal Protocol developed by Open Whisper Systems and pivotal in WhatsApp’s rolling out of E2EE is &lt;a href="https://github.com/signalapp"&gt;open source&lt;/a&gt;. Here is how the experiment proceeds:&lt;/p&gt;
&lt;p class="p4" style="text-align: justify; "&gt;&lt;i&gt;Rick sends Morty an attachment.&lt;/i&gt;&lt;/p&gt;
&lt;p class="p4" style="text-align: justify; "&gt;&lt;i&gt;Morty then switches off the data on her mobile device.&lt;/i&gt;&lt;/p&gt;
&lt;p class="p4" style="text-align: justify; "&gt;&lt;i&gt;Rick downloads the attachment, an image.&lt;/i&gt;&lt;/p&gt;
&lt;p class="p4" style="text-align: justify; "&gt;&lt;i&gt;Subsequently, Rick deletes the image from his mobile device’s internal storage.&lt;/i&gt;&lt;/p&gt;
&lt;p class="p4" style="text-align: justify; "&gt;&lt;i&gt;Rick then logs into a WhatsApp’s web client on his browser. (Prior to this experiment, both Rick and Morty had logged out from all instances of the web client)&lt;/i&gt;&lt;/p&gt;
&lt;p class="p4" style="text-align: justify; "&gt;&lt;i&gt;Upon a fresh log-in to the web client and opening the chat with Morty, the option to download the image is available to Rick.&lt;/i&gt;&lt;/p&gt;
&lt;p class="p4" style="text-align: justify; "&gt;The experiment concludes with bewilderment at WhatsApp’s claim of deletion-upon-delivery as outlined earlier. The only place from which Morty could have downloaded the image would be from Facebook’s ‘blobs.’ The attachment could not have been retrieved from Morty’s mobile device as it had no way of sending data and neither from Rick’s mobile device as it no longer existed in the device’s storage.&lt;/p&gt;
&lt;p class="p4" style="text-align: justify; "&gt;As per the Privacy Policy, the data is stored on the ‘blobs’ for a period of 30 days after transmission of a message only when it can’t be delivered to the recipient. Upon delivery, the deletion-upon-delivery model is supposed to kick in.&lt;/p&gt;
&lt;p class="p4" style="text-align: justify; "&gt;Another straightforward experiment that leads to a similar conclusion is seeing the difference in time taken for a large attachment to be forwarded as opposed to when the same large attachment is uploaded. Forwarding is palpably quicker than uploading afresh: non-storage of attachments on the ‘blob’ would entail that the same amount should be taken for both.&lt;/p&gt;
&lt;p class="p4" style="text-align: justify; "&gt;The plot thickens. WhatsApp’s Privacy Policy goes on to state: “To improve performance and deliver media messages more efficiently, such as when many people are sharing a popular photo or video, we may retain that content on our servers for a longer period of time.”  The technical paper offers no help in understanding how WhatsApp systems assess frequently shared encrypted media messages without decrypting it at its end.&lt;/p&gt;
&lt;p class="p4" style="text-align: justify; "&gt;A possible explanation could be the usage of metadata by WhatsApp, which it discloses in its Privacy Policy while simultaneously being sufficiently vague about the specifics of it. That WhatsApp may be capable of reading encrypted communication through the inclusion of a backdoor bodes well for law enforcement, but not so much for unsuspecting users.&lt;/p&gt;
&lt;h3 style="text-align: justify; "&gt;The weakest link in the chain&lt;/h3&gt;
&lt;p class="p4" style="text-align: justify; "&gt;Concerns about backdoors in WhatsApp’s product have led the French government to start developing their &lt;a href="https://www.reuters.com/article/us-france-privacy/france-builds-whatsapp-rival-due-to-surveillance-risk-idUSKBN1HN258"&gt;own encrypted messaging service&lt;/a&gt;. This will be built using Matrix – an open protocol designed for real-time communication. Indeed, the Privacy Policy lays out that the company “may collect, use, preserve, and share your information if we have a good-faith belief that it is reasonably necessary to respond pursuant to applicable law or regulations, to legal process, or to government requests.”&lt;/p&gt;
&lt;p class="p4" style="text-align: justify; "&gt;The Signal Protocol is the undisputed gold standard of E2EE implementations. It is the integration with the surrounding functionality that WhatsApp offers which leads to vulnerabilities. After all, a chain is only as strong as its weakest link. Assuming that the attachments stored on the ‘blobs’ are in encrypted form, indecipherable to all but the intended recipients, this does not pose a privacy risk for the users from a technological point of view.&lt;/p&gt;
&lt;p class="p4" style="text-align: justify; "&gt;However, it is easy lose sight of the fact that the Privacy Policy is a legally binding document and it specifically states that messages are not stored on the ‘blobs’ as a matter of routine. As a side note, WhatsApp’s Privacy Policy and Terms of Service are refreshing in their readability and lack of legalese.&lt;/p&gt;
&lt;p class="p4" style="text-align: justify; "&gt;As we were putting the final touches to this piece, &lt;a href="https://wabetainfo.com/whatsapp-allows-to-redownload-deleted-media/#more-2781"&gt;news from &lt;i&gt;WABetaInfo&lt;/i&gt;&lt;/a&gt;, a well-reputed source of information on WhatsApp features, has broken that newer updates of WhatsApp for Android are permitting users to re-download media deleted up to three months back. WhatsApp cannot possibly achieve this without storing the media in the ‘blobs,’ or in other words, in violation of its Privacy Policy.&lt;/p&gt;
&lt;p class="p4" style="text-align: justify; "&gt;As the aphorism goes: “When the service is free, you are the product.”&lt;/p&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/blog/asia-times-april-20-2018-aayush-rathi-sunil-abraham-what-s-up-with-whatsapp'&gt;https://cis-india.org/internet-governance/blog/asia-times-april-20-2018-aayush-rathi-sunil-abraham-what-s-up-with-whatsapp&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Aayush Rathi and Sunil Abraham</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Social Media</dc:subject>
    
    
        <dc:subject>Privacy</dc:subject>
    
    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Featured</dc:subject>
    
    
        <dc:subject>WhatsApp</dc:subject>
    
    
        <dc:subject>Homepage</dc:subject>
    

   <dc:date>2018-04-23T16:45:51Z</dc:date>
   <dc:type>Blog Entry</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/blog/artificial-intelligence-in-governance-a-report-of-the-roundtable-held-in-new-delhi">
    <title>Artificial Intelligence in Governance: A Report of the Roundtable held in New Delhi</title>
    <link>https://cis-india.org/internet-governance/blog/artificial-intelligence-in-governance-a-report-of-the-roundtable-held-in-new-delhi</link>
    <description>
        &lt;b&gt;This Report provides an overview of the proceedings of the Roundtable on Artificial Intelligence (AI) in Governance, conducted at the Indian Islamic Cultural Centre, in New Delhi on March 16, 2018. The main purpose of the Roundtable was to discuss the deployment and implementation of AI in various aspects of governance within the Indian context. This report summarises the discussions on the development and implementation of AI in various aspects of governance in India. The event was attended by participants from academia, civil society, the legal sector, the finance sector, and the government.&lt;/b&gt;
        &lt;p&gt;&lt;span&gt;Event Report: &lt;/span&gt;&lt;a class="external-link" href="https://cis-india.org/internet-governance/files/ai-in-governance"&gt;Download&lt;/a&gt;&lt;span&gt; (PDF)&lt;/span&gt;&lt;/p&gt;
&lt;hr /&gt;
&lt;p style="text-align: justify; "&gt;This report provides a summary of the proceedings of the Roundtable on Artificial Intelligence (AI) in Governance (hereinafter referred to as ‘the Roundtable’). The Roundtable took place at the India Islamic Cultural Centre in New Delhi on March 16, 2018 and included participation  from academia, civil society, law, finance, and government. The main purpose of the Roundtable was to discuss the deployment and implementation of AI in various aspects of governance within the Indian context.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;The Roundtable began with a presentation by Amber Sinha (Centre for Internet and Society - CIS) providing an overview of the CIS’s research objectives and findings thus far. During this presentation, he defined both AI and the scope of CIS’s research, outlining the areas of law enforcement, defense, education, judicial decision making, and the discharging of administrative functions as the main areas of concerns for the study. The presentation then outlined the key AI deployments and implementations that have been identified by the research in each of these areas. Lastly, the presentation raised some of the ethical and legal concerns related to this phenomenon.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;span&gt;The presentation was followed by the Roundtable discussion that saw various topics in regards to the usages, challenges, ethical considerations and implications of AI in the sector being discussed. This report has identified a number of key themes of importance evident throughout these discussions.These themes include: (1) the meaning and scope of AI, (2) AI’s sectoral applications, (3) human involvement with automated decision making, (4) social and power relations surrounding AI, (5) regulatory approaches to AI and, (6) challenges to adopting AI. These themes in relation to the Roundtable are explored further below.&lt;/span&gt;&lt;/p&gt;
&lt;h3&gt;&lt;span&gt;Meaning and Scope of AI&lt;/span&gt;&lt;/h3&gt;
&lt;p&gt;&lt;span id="docs-internal-guid-7edcf822-2698-f1fd-35d3-0bcc913c986a"&gt; &lt;/span&gt;&lt;/p&gt;
&lt;p dir="ltr" style="text-align: justify; "&gt;&lt;span&gt;One of the first tasks recommended by the group of participants was to define the meaning and scope of AI and the way those terms are used and adopted today. These concerns included the need to establish a distinction between the use of algorithms, machine learning, automation and artificial intelligence. Several participants believed that establishing consensus around these terms was essential before proceeding towards a stage of developing regulatory frameworks around them.&lt;/span&gt;&lt;/p&gt;
&lt;p dir="ltr" style="text-align: justify; "&gt;&lt;span&gt;The general fact agreed to was that AI as we understand it does not necessarily extend to complete independence in terms of automated decision making but it refers instead to the varying levels of machine learning (ML), and the automation of certain processes that has already been achieved. Several concerns that emerged during the course of the discussion centred around the question of autonomy and transparency in the process of ML and algorithmic processing. Stakeholders recommended that over and above the debates of humans in the loop [1] on the loop [2] and out of the loop, [3] there were several other gaps with respect to AI and its usage in the industry today which also need to be considered before building a roadmap for future usage. Key issues like information asymmetries, communication lags, a lack of transparency, the increased mystification of the coding process and the centralization of power all needed to be examined and analysed under the rubric of developing regulatory frameworks.&lt;/span&gt;&lt;/p&gt;
&lt;p dir="ltr" style="text-align: justify; "&gt;&lt;span&gt;Takeaway Point: The group brought out the need for standardization of terminology as well as the establishment of globally replicable standards surrounding the usage, control and proliferation of AI. The discussion also brought up the problems with universal applicability of norms. One of the participants brought up an issue regarding the lack of normative frameworks around the usage and proliferation of AI. Another participant responded to the concern by alluding to the Asilomar AI principles.[4] The Asilomar AI principles are a set of 23 principles aimed at directing and shaping AI research in the future. The discussion brought out further issues regarding the enforceability as well universal applicability of the principles and their global relevance as well. Participants recommended the development of a shorter, more universally applicable regulatory framework that could address various contextual limitations as well.&lt;/span&gt;&lt;/p&gt;
&lt;h3&gt;&lt;span&gt;AI Sectoral Applications&lt;/span&gt;&lt;/h3&gt;
&lt;p&gt;&lt;span&gt;Participants mentioned a number of both current and potential applications of AI technologies, referencing the defence sector, the financial sector, and the agriculture sector. There are several developments taking place on the Indian military front with the Committee on AI and National Security being established by the Ministry of Defence. Through the course of the discussion it was also stated that the Indian Armed Forces were very interested in the possibilities of using AI for their own strategic and tactical purposes. From a technological standpoint, however, there has been limited progress in India in researching and developing AI. &lt;/span&gt;&lt;/p&gt;
&lt;p&gt;&lt;span&gt;While India does deploy some Unmanned Aerial Vehicles (UAVs), they are mostly bought from Israel, and often are not autonomous. It was also pointed out that contrary to reportage in the media, the defence establishment in India is extremely cautious about the adoption of autonomous weapons systems, and that the autonomous technology being rolled out by the CAIR is not yet considered trustworthy enough for deployment.&lt;/span&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;span&gt;Discussions further revealed that the few technologies that have a relative degree of autonomy are primarily loitering ammunitions and are used to target radar insulations for reconnaissance purposes. One participant mentioned that while most militaries are interested in deploying AI, it is primarily from an Intelligence, Surveillance and Reconnaissance (ISR) perspective. The only exception to this generalization is China where the military ethos and command structure would work better with increased reliance on independent AI systems. One major AI system rolled out by the US is Project Maven which is primarily an ISR system. The aim of using these systems is to improve decision making and enhance data analysis particularly since battlefields generate a lot of data that isn’t used anywhere.&lt;/span&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;span&gt;Another sector discussed was the securities market where algorithms were used from an analytical and data collection perspective. A participant referred to the fact that machine learning was being used for processes like credit and trade scoring -- all with humans on the loop. The participant further suggested that while trade scoring was increasingly automated, the overall predictive nature of such technologies remained within a self limiting capacity wherein statistical models, collected data and pattern analysis were used to predict future trends. The participant questioned whether these algorithms could be considered as AI in the truest sense of the term since they primarily performed statistical functions and data analysis.&lt;/span&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;span&gt;One participant also recommended the application of AI to sectors like agriculture with the intention of gradually acclimatizing users to the technology itself. Respondents also stated that while AI technologies were being used in the agricultural space it was primarily from the standpoint of data collection and analysis as opposed to predictive methods. It was mentioned that a challenge to the broad adoption of AI in this sector is the core problem of adopting AI as a methodology – namely information asymmetries, excessive data collection, limited control/centralization and the obfuscatory nature of code – would not be addressed/modified. Lastly, participants also suggested that within the Indian framework not much was being done aside from addressing farmers’ queries and analysing the data from those concerns.&lt;/span&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;span&gt;Takeaway Point: The discussion drew attention to the various sectors where AI was currently being used -- such as the military space, agricultural development and the securities market -- as well as potential spaces of application -- such as healthcare and manual scavenging. The key challenges that emerged were information asymmetries with respect to the usage of these technologies as well as limited capacity in terms of technological advancement.&lt;/span&gt;&lt;/p&gt;
&lt;h3&gt;&lt;span&gt;Human Involvement with Automated Decision Making&lt;/span&gt;&lt;/h3&gt;
&lt;p style="text-align: justify; "&gt;&lt;span&gt;Large parts of discussions throughout the Roundtable event were preoccupied with automated decision making and specifically, the involvement of humans (human on and in the loop) or lack thereof (human out of the loop) in this process. These discussions often took place with considerations of AI for prescriptive and descriptive uses.&lt;/span&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;span&gt;Participants expressed that human involvement was not needed when AI was being used for descriptive uses, such as determining relationships between various variables in large data sets. Many agreed to the superior ability of ML and similar AI technologies in describing large and unorganized datasets. It was the prescriptive uses of AI where participants saw the need for human involvement, with many questioning the technology making more important decisions by itself.&lt;/span&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;span&gt;The need for human involvement in automated decision making was further justified by references to various instances of algorithmic bias in the American context. One participant, for example, brought up the use of algorithmic decision making by a school board in the United States for human resource practices (hirings, firing, etc.) based on the standardized test scores of students. In this instance, such practices resulted in the termination of teachers primarily from low income neighbourhoods.[5] The main challenge participants identified in regards to human on the loop automated decision making is the issue of capacity, as significant training would have to be achieved for sectors to have employees actively involved in the automated decision making workflow.&lt;/span&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;span&gt;An example in the context of the healthcare field was brought up by one participant arguing for human in the loop in regards to prescriptive scenarios. The participant suggested that AI technology, when given x-ray or MRI data for example, should only be limited to pointing out the correlations of diseases with patients’ scans/x-rays. Analysis of such correlations should be reserved for the medical expertise of doctors who would then determine if any instances of causality can be identified from this data and if it’s appropriate for diagnosing patients.&lt;/span&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;span&gt;It was emphasized that, despite a preference for human on/in the loop in regards to automated decision making, there is a need to be cognisant of techno-solutionism due to the human tendency of over reliance on technology when making decisions. A need for command and control structures and protocols was emphasized for various governance sectors in order to avoid potentially disastrous results through a checks and balances system. It was noted that the defense sector has already developed such protocols, having established a chain of command due to its long history of algorithmic decision making (e.g. the Aegis Combat System being used by the US Navy in the 1980s).&lt;/span&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;span&gt;One key reason why militaries prefer human in and on the loop systems as opposed to out of the loop systems is because of the protocol associated with human action on the battlefield. International Humanitarian Law has clear indicators of what constitutes a war crime and who is to be held responsible in the scenario but developing such a framework with AI systems would be challenging as it would be difficult to determine which party ought to be held accountable in the case of a transgression or a mistake.&lt;/span&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;span&gt;Takeaway Point: It was reiterated by many participants that neither AI technology or India’s regulatory framework is at a point where AI can be trusted to make significant decisions alone -- especially when such decisions are evaluating humans directly. It was recommended that human out of the loop decision making should be reserved for descriptive practices whereas human on and in the loop decision making should be used for prescriptive practices. Lastly, it was also suggested that appropriate protocols be put in place to direct those involved in the automated decision making workflow. Particularly when the process involves judgements and complex decision making in sectors such as jurisprudence and the military.&lt;/span&gt;&lt;/p&gt;
&lt;h3&gt;&lt;span&gt;The Social and Power Relations Surrounding AI&lt;/span&gt;&lt;/h3&gt;
&lt;p style="text-align: justify; "&gt;&lt;span&gt; &lt;/span&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Some participants emphasized the need to contextualize discussions of AI and governance within larger themes of poverty, global capital and power/social relations. Their concerns were that the use of AI technologies would only create and reinforce existing power structures and should instead be utilized towards ameliorating such issues. Manual scavenging, for example, was identified as an area where AI could be used to good effect if coupled with larger socio-political policy changes. There are several hierarchies that could potentially be reinforced through this process and all these failings needed to be examined thoroughly before such a system was adopted and incorporated within the real world.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Furthermore the discussion also revealed that the objectivity attributed to AI and ML tends to gloss over the fact that there are nonetheless implicit biases that exist in the minds of the creators that might work themselves into the code. Fears regarding technology recreating a more exclusionary system were not entirely unfounded as participants pointed out the fact that the knowledge base of the user would determine whether technology was used as a tool of centralization or democratization.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;One participant also questioned the concept of governance itself, contrasting the Indian government’s usage of the term in the 1950s (as it appears in the Directive Principle) with that of the World Bank in the 1990s.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Some participants emphasized the need to contextualize discussions of AI and governance within larger themes of poverty, global capital and power/social relations. Their concerns were that the use of AI technologies would only create and reinforce existing power structures and should instead be utilized towards ameliorating such issues. Manual scavenging, for example, was identified as an area where AI could be used to good effect if coupled with larger socio-political policy changes. There are several hierarchies that could potentially be reinforced through this process and all these failings needed to be examined thoroughly before such a system was adopted and incorporated within the real world.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;span&gt;Furthermore the discussion also revealed that the objectivity attributed to AI and ML tends to gloss over the fact that there are nonetheless implicit biases that exist in the minds of the creators that might work themselves into the code. Fears regarding technology recreating a more exclusionary system were not entirely unfounded as participants pointed out the fact that the knowledge base of the user would determine whether technology was used as a tool of centralization or democratization. &lt;/span&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;span&gt;One participant also questioned the concept of governance itself, contrasting the Indian government’s usage of the term in the 1950s (as it appears in the Directive Principle) with that of the World Bank in the 1990s. &lt;/span&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;span&gt;Takeaway Point: Discussions of the implementation and deployment of AI within the governance landscape should attempt to take into consideration larger power relations and concepts of equity.&lt;/span&gt;&lt;/p&gt;
&lt;h3&gt;&lt;span&gt;Regulatory Approaches to AI&lt;/span&gt;&lt;/h3&gt;
&lt;p style="text-align: justify; "&gt;&lt;span&gt;Many recognized the need for AI-specific regulations across Indian sectors, including governance. These regulations, participants stated, should draw from notions of accountability, algorithmic transparency and efficiency. Furthermore, it was also stated that such regulations should consider the variations across the different legs of the governance sector, especially in regards to defence. One participant, pointing to the larger trends towards automation, recommended the establishment of certain fundamental guidelines aimed at directing the applicability of AI in general. The participant drew attention to the need for a robust evaluation system for various sectors (the criminal justice system, the securities market, etc.) as a way of providing checks on algorithmic biases. Another emphasized for the need of regulations for better quality data as to ensure machine readability and processiblity for various AI systems.&lt;/span&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;span&gt;Another key point that emerged was the importance of examining how specific algorithms performed processes like identification or detection. A participant recommended the need to examine the ways in which machines identify humans and what categories/biases could infiltrate machine-judgement. They reiterated that if a new element was introduced in the system, the pre-existing variables would be impacted as well. The participant further recommended that it would be useful to look at these systems in terms of the couplings that get created in order to determine what kinds of relations are fostered within that system.&lt;/span&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;span&gt;The roundtable saw some debate regarding the most appropriate approach to developing such regulations. Some participants argued for a harms-based approach, particularly in regards to determining if regulations are needed all together for specific sectors (as opposed to guidelines, best practices, etc.). The need to be cognisant of both individual and structural harms was emphasized, mindful of the possibility of algorithmic biases affecting traditionally marginalized groups.&lt;/span&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;span&gt;Others only saw value in a harms based approach insomuch that it could help outline the appropriate penalties in an event of regulations being violated, arguing instead for a rights-based approach as it enabled greater room for technological changes. An approach that kept in mind emerging AI technologies was reiterated by a number of participants as being crucial to any regulatory framework. The need for a regulatory space that allowed for technological experimentation without the fear of constitutional violation was also communicated.&lt;/span&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;span&gt;Takeaway Point: The need for a AI-specific regulatory framework cognisant of differentiations across sectors in India was emphasized. There is some debate about the most appropriate approach for such a framework, a harms-based approach being identified by many as providing the best perspective on regulatory need and penalties. Some identified the rights-based approach as providing the most flexibility for an rapidly evolving technological landscape.&lt;/span&gt;&lt;/p&gt;
&lt;h3&gt;&lt;span&gt;Challenges to Adopting AI&lt;/span&gt;&lt;/h3&gt;
&lt;p style="text-align: justify; "&gt;&lt;span&gt;Out of all the concerns regarding the adoption of algorithms, ML and AI, the two key points of resistance that emerged, centred around issues of accountability and transparency. Participants suggested that within an AI system, predictability would be a key concern, and in the absence of predictable outcomes, establishing redressal mechanisms would pose key challenges as well.&lt;/span&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;span&gt; &lt;/span&gt;&lt;/p&gt;
&lt;p id="_mcePaste"&gt;A discussion was also initiated regarding the problems involved in attributing responsibility within the AI chain as well as the need to demystify the process of using AI in daily life. While reiterating the current landscape, participants spoke about how the usage of AI is currently limited to the automation of certain tasks and processes in certain sectors where algorithmic processing is primarily used as a tool of data collection and analysis as opposed to an independent decision making tool.&lt;/p&gt;
&lt;div id="_mcePaste"&gt;&lt;/div&gt;
&lt;p id="_mcePaste"&gt;One of the suggestions and thought points that emerged during the discussion was whether a gradual adoption of AI on a sectoral basis might be more beneficial as it would provide breathing room in the middle to test the system and establish trust between the developers, providers, and consumers. This prompted a debate about the controllers and the consumers of AI and how the gap between the two would need to be negotiated. The debate also brought up larger concerns regarding the mystification of AI as a process itself and the complications of translating the code into communicable points of intervention.&lt;/p&gt;
&lt;div id="_mcePaste"&gt;&lt;/div&gt;
&lt;p id="_mcePaste"&gt;Another major issue that emerged was the question of attribution of responsibility in the case of mistakes. In the legal process as it currently exists, human imperfections notwithstanding, it would be possible to attribute the blame for decisions taken to certain actants undertaking the action. Similarly in the defence sector, it would be possible to trace the chain of command and identify key points of failure, but in the case of AI based judgements, it would be difficult to place responsibility or blame. This observation led to a debate regarding accountability in the AI chain. It was inconclusive whether the error should be attributed to the developer, the distributor or the consumer.&lt;/p&gt;
&lt;div id="_mcePaste"&gt;&lt;/div&gt;
&lt;p id="_mcePaste" style="text-align: justify; "&gt;A suggestion that was offered in order to counter the information asymmetry as well as reduce the mystification of computational method was to make the algorithm and its processes transparent. This sparked a debate, however, as participants stated that while such a state of transparency ought to be sought after and aspired towards, it would be accompanied by certain threats to the system. A key challenge that was pointed out was the fact that if the algorithm was made transparent, and its details were shared, there would be several ways to manipulate it, translate it and misuse it.&lt;/p&gt;
&lt;div id="_mcePaste"&gt;&lt;/div&gt;
&lt;p id="_mcePaste" style="text-align: justify; "&gt;Another question that emerged was the distribution of AI technologies and the centralization of the proliferation process particularly in terms of service provision. One participant suggested that given the limited nature of research being undertaken and the paucity of resources, a limited number of companies would end up holding the best tech, the best resources and the best people. They further suggested that these technologies might end up being rolled out as a service on a contractual basis. In which case it would be important to track how the service was being controlled and delivered. Models of transference would become central points of negotiation with alternations between procurement based, lease based, and ownership based models of service delivery. Participants suggested that this was going to be a key factor in determining how to approach these issues from a legal and policy standpoint.&lt;/p&gt;
&lt;div&gt;&lt;/div&gt;
&lt;p style="text-align: justify; "&gt;A discussion was also initiated regarding the problems involved in attributing responsibility within the AI chain as well as the need to demystify the process of using AI in daily life. While reiterating the current landscape, participants spoke about how the usage of AI is currently limited to the automation of certain tasks and processes in certain sectors where algorithmic processing is primarily used as a tool of data collection and analysis as opposed to an independent decision making tool.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;One of the suggestions and thought points that emerged during the discussion was whether a gradual adoption of AI on a sectoral basis might be more beneficial as it would provide breathing room in the middle to test the system and establish trust between the developers, providers, and consumers. This prompted a debate about the controllers and the consumers of AI and how the gap between the two would need to be negotiated. The debate also brought up larger concerns regarding the mystification of AI as a process itself and the complications of translating the code into communicable points of intervention.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Another major issue that emerged was the question of attribution of responsibility in the case of mistakes. In the legal process as it currently exists, human imperfections notwithstanding, it would be possible to attribute the blame for decisions taken to certain actants undertaking the action. Similarly in the defence sector, it would be possible to trace the chain of command and identify key points of failure, but in the case of AI based judgements, it would be difficult to place responsibility or blame. This observation led to a debate regarding accountability in the AI chain. It was inconclusive whether the error should be attributed to the developer, the distributor or the consumer.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;A suggestion that was offered in order to counter the information asymmetry as well as reduce the mystification of computational method was to make the algorithm and its processes transparent. This sparked a debate, however, as participants stated that while such a state of transparency ought to be sought after and aspired towards, it would be accompanied by certain threats to the system. A key challenge that was pointed out was the fact that if the algorithm was made transparent, and its details were shared, there would be several ways to manipulate it, translate it and misuse it.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Another question that emerged was the distribution of AI technologies and the centralization of the proliferation process particularly in terms of service provision. One participant suggested that given the limited nature of research being undertaken and the paucity of resources, a limited number of companies would end up holding the best tech, the best resources and the best people. They further suggested that these technologies might end up being rolled out as a service on a contractual basis. In which case it would be important to track how the service was being controlled and delivered. Models of transference would become central points of negotiation with alternations between procurement based, lease based, and ownership based models of service delivery. Participants suggested that this was going to be a key factor in determining how to approach these issues from a legal and policy standpoint.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Takeaway Point: The two key points of resistance that emerged during the course of discussion were accountability and transparency. Participants pointed out the various challenges involved in attributing blame within the AI chain and they also spoke about the complexities of opening up AI code, thereby leaving it vulnerable to manipulation. Certain other challenges that were briefly touched upon were the information asymmetry, excessive data collection, centralization of power in the hands of the controllers and complicated service distribution models.&lt;/p&gt;
&lt;h3 style="text-align: justify; "&gt;Conclusion&lt;/h3&gt;
&lt;p style="text-align: justify; "&gt;The Roundtable provided some insight into larger debates regarding the deployment and applications of AI in the governance sector of India. The need for a regulatory framework as well as globally replicable standards surrounding AI was emphasized, particularly one mindful of the particular needs of differing fields of the governance sector (especially defence). Furthermore, a need for human on/in the loop practices with regards to automated decision making was highlighted for prescriptive instances, particularly when such decisions are responsible for directly evaluating humans. Contextualising AI within its sociopolitical parameters was another key recommendation as it would help filter out the biases that might work themselves into the code and affect the performance of the algorithm. Further, it is necessary to see the involvement and influence of the private sector in the deployment of AI for governance, it often translating into the delivery of technological services from private actors to public bodies towards discharge of public functions. This has clear implications for requirements of transparency  and procedural fairness even in private sector delivery of these services. Defining the meaning and scope of AI while working to demystify algorithms themselves would serve to strengthen regulatory frameworks as well as make AI more accessible for the user / consumer.&lt;/p&gt;
&lt;hr /&gt;
&lt;p style="text-align: justify; "&gt;[1]. Automated decision making model where final decisions are made by a human operator&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;[2]. Automated decision making model where decisions can be made without human involvement but a human can override the system.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;[3]. A completely autonomous decision making model requiring no human involvement&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;[4]. https://futureoflife.org/ai-principles/&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;[5]. The participant was drawing this example from Cathy O’Neil’s Weapons of Math Destruction, (Penguin,2016), at 4-13.&lt;/p&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/blog/artificial-intelligence-in-governance-a-report-of-the-roundtable-held-in-new-delhi'&gt;https://cis-india.org/internet-governance/blog/artificial-intelligence-in-governance-a-report-of-the-roundtable-held-in-new-delhi&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Saman Goudarzi and Natallia Khaniejo</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Artificial Intelligence</dc:subject>
    
    
        <dc:subject>Privacy</dc:subject>
    

   <dc:date>2018-05-03T15:49:40Z</dc:date>
   <dc:type>Blog Entry</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/blog/government-giving-free-publicity-worth-40-k-to-twitter-and-facebook">
    <title>Government gives free publicity worth 40k to Twitter and Facebook </title>
    <link>https://cis-india.org/internet-governance/blog/government-giving-free-publicity-worth-40-k-to-twitter-and-facebook</link>
    <description>
        &lt;b&gt;We conducted a 2 week survey of newspapers for links between government advertisement to social media giants. As citizens, we should be worried about the close nexus between the Indian government and digital behemoths such as Facebook, Google and Twitter. It has become apparent to us after a 2 week print media analysis that our Government has been providing free publicity worth Rs 40,000 to these entities. There are multiple issues with this as this article attempts at pointing out.&lt;/b&gt;
        
&lt;p style="text-align: justify;"&gt;&lt;img src="https://cis-india.org/home-images/TotalAdvertisementExpenditure.jpg" alt="null" class="image-inline" title="Total Advertisement Expenditure" /&gt;&lt;/p&gt;
&lt;p style="text-align: justify;"&gt;We analyzed 5 English language newspapers daily for 2 weeks from March 12&lt;sup&gt;th&lt;/sup&gt; to 26&lt;sup&gt;th&lt;/sup&gt;, one week of the newspapers in Lucknow and the second week in Bangalore. Facebook, Twitter, Instagram and Alphabet backed services such as Youtube and Google Plus were part of our survey. Of a total of 33 advertisements (14 in Lucknow+19 in Bangalore), Twitter stands out as the most prominent advertising platform used by government agencies with 30 ads but Facebook at 29 was more expensive. In order to ascertain the rates of publicity, current advertisement rates for Times of India as our purpose was to solely give a rough estimation of how much the government is spending.&lt;/p&gt;
&lt;p style="text-align: justify;"&gt;Advertising of this nature is not merely an inherent problem of favoring some social media companies over others but also symptomatic of a bigger problem, the lack of our native e-governance mechanisms which cause the Government to rely and promote others. Where we do have guidelines they are not being followed. By outsourcing their e-governance platforms to Twitter such as TwitterSeva, a feature created by the Twitter India team to help citizens connect better with government services, there is less of an impetus to construct better &lt;a class="external-link" href="https://factordaily.com/twitter-helping-india-reboot-public-services-publicly/"&gt;websites of their own&lt;/a&gt;.&lt;/p&gt;
&lt;p style="text-align: justify;"&gt;If this is so because we currently do not have the capacity to build them ourselves then it is imperative that this changes. We should either be executing government functions on digital infrastructure owned by them or on open and interoperable systems. If anything, the surveyed social media platforms can be used to enhance pre-existing facilities. However, currently the converse is true with these platforms overshadowing the presence of e-governance websites. Officials have started responding to complaints on Twitter, diluting the significance of such complaint mechanisms on their respective department’s portal. Often enough such features are not available on the relevant government website. This sets a dangerous precedent for a citizen management system as the records of such interactions are then in the hands of these companies who may not exist in the future. As a result, they can control the access to such records or worse tamper with them. Posterity and reliability of such data can be ensured only if they are stored within the Government’s reach or if they are open and public with a first copy stored on Government records which ensures transparency as well. Data portability is an important facet to this issue as well as being a right consumers should possess. It provides for support of many devices, transition to alternative technologies and lastly, makes sure that all the data like other public records will be available upon request through the Right to Information procedure. The last is vital to uphold the spirit of transparency envisioned through the RTI process since interactions of government with citizens are then under its ambit and available for disclosure for whomsoever concerned.&lt;/p&gt;
&lt;p style="text-align: justify;"&gt;Secondly, such practices by the Government are enhancing the monopoly of the companies in the market effectively discouraging competition and eventually, innovation. While a certain elite strata of the population might opt for Twitter or Facebook as their mode of conveying grievance, this may not hold true for the rest of the online India population.&lt;/p&gt;
&lt;p style="text-align: justify;"&gt;Picking players in a free market is in violation of technology and vendor neutrality, a practice essential in e-governance to provide a level playing field for all and competing technologies. Projecting only a few platforms as de facto mediums of communication with the government inhibits the freedom of choice of citizens to air their grievances through a vendor or technology they are comfortable with. At the same time it makes the Government a mouthpiece for such companies who are gaining free publicity and consolidating their popularity. Government apps such as the SwachBharat one which is an e-governance platform do not offer much more in terms of functionality but either reflect the website or are a less mature version of the same. This leads to the problem of fracturing with many avenues of complaining such as the website, app, Twitter etc. Consequently, the priority of the people dealing with the complaints in terms of platform of response is unsure. Will I be responded to sooner if I tweet a complaint as opposed to putting it up on the app? Having an interoperable system can solve this where the Government can have a dashboard of their various complaints and responses are then made out evenly. Twitter itself could implement this by having complaints from Facebook for example and then the Twitter Seva would be an equal platform as opposed to the current issue where only they are favored.&lt;/p&gt;
&lt;p style="text-align: justify;"&gt;Recent events have illustrated how detrimental the storage of data by these giants can be in terms of privacy. Data security concerns are also a consequence of such leaks. Not only is this a long overdue call for a better data protection law but at the same time also for the Government to realize that these platforms cannot be trusted. The hiring of Cambridge Analytica to influence voters in the US elections, based on their Facebook profiles and ancillary data, effectively put the governance of the country on sale by exploiting these privacy and security issues. By basing e-governance on their backbone, India is not far from inviting trouble as well. It is unnecessary and dangerous to have a go-between for matters that pertain between an individual and state.&lt;/p&gt;
&lt;p style="text-align: justify;"&gt;As this article was being written, it was confirmed by the Election Commission that they are partnering with Facebook for the Karnataka Assemby Elections to promote activities such as encourage enrollment of Voter ID and voter participation. Initiatives like these tying the government even closer to these companies are of concern and cementing the latter’s stronghold.&lt;/p&gt;
&lt;p style="text-align: justify;"&gt;&lt;em&gt;Note: Our survey data and results are attached to this post. All research was collected by Shradha Nigam, a Vth year student at NLSIU, Bangalore.&lt;/em&gt;&lt;/p&gt;
&lt;hr /&gt;
&lt;h3 style="text-align: justify;"&gt;Survey Data and Results&lt;/h3&gt;
&lt;p style="text-align: justify;"&gt;This report is based on a survey of government advertisements in English language newspapers in relation to their use of social media platforms and dedicated websites (“&lt;strong&gt;Survey&lt;/strong&gt;”). For the purpose of this report, the ambit of the social media platforms has been limited to the use of Facebook, Twitter, YouTube, Google Plus and Instagram. The report was prepared by Shradha Nigam, a student from National Law School of India University, Bangalore. &lt;a class="external-link" href="http://cis-india.org/internet-governance/files/cis-report-on-social-media"&gt;Read the full report here&lt;/a&gt;.&lt;/p&gt;

        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/blog/government-giving-free-publicity-worth-40-k-to-twitter-and-facebook'&gt;https://cis-india.org/internet-governance/blog/government-giving-free-publicity-worth-40-k-to-twitter-and-facebook&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Akriti Bopanna</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Google</dc:subject>
    
    
        <dc:subject>Instagram</dc:subject>
    
    
        <dc:subject>Privacy</dc:subject>
    
    
        <dc:subject>Twitter</dc:subject>
    
    
        <dc:subject>YouTube</dc:subject>
    
    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Featured</dc:subject>
    
    
        <dc:subject>Google Plus</dc:subject>
    
    
        <dc:subject>Facebook</dc:subject>
    
    
        <dc:subject>Homepage</dc:subject>
    

   <dc:date>2018-04-27T09:52:26Z</dc:date>
   <dc:type>Blog Entry</dc:type>
   </item>




</rdf:RDF>
